language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | celery__celery | t/unit/worker/test_heartbeat.py | {
"start": 76,
"end": 488
} | class ____:
heart = None
next_iter = 0
def __init__(self):
self.sent = []
self.on_enabled = set()
self.on_disabled = set()
self.enabled = True
def send(self, msg, **_fields):
self.sent.append((msg, _fields))
if self.heart:
if self.next_iter > 10:
self.heart._shutdown.set()
self.next_iter += 1
| MockDispatcher |
python | walkccc__LeetCode | solutions/3403. Find the Lexicographically Largest String From the Box I/3403.py | {
"start": 0,
"end": 1153
} | class ____:
def answerString(self, word: str, numFriends: int) -> str:
if numFriends == 1:
return word
s = self._lastSubstring(word)
sz = len(word) - numFriends + 1
return s[:min(len(s), sz)]
# Same as 1163. Last Substring in Lexicographical Order
def _lastSubstring(self, s: str) -> str:
i = 0
j = 1
k = 0 # the number of the same letters of s[i..n) and s[j..n)
while j + k < len(s):
if s[i + k] == s[j + k]:
k += 1
elif s[i + k] > s[j + k]:
# Skip s[j..j + k) and advance to s[j + k + 1] to find a possible
# lexicographically larger substring since s[i..i + k) == s[j..j + k)
# and s[i + k] > s[j + k).
j = j + k + 1
k = 0
else:
# Skip s[i..i + k) and advance to s[i + k + 1] or s[j] to find a
# possible lexicographically larger substring since
# s[i..i + k) == s[j..j + k) and s[i + k] < s[j + k).
# Note that it's unnecessary to explore s[i + k + 1..j) if
# i + k + 1 < j since they are already explored by j.
i = max(i + k + 1, j)
j = i + 1
k = 0
return s[i:]
| Solution |
python | plotly__plotly.py | plotly/graph_objs/waterfall/_outsidetextfont.py | {
"start": 233,
"end": 17203
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall"
_path_str = "waterfall.outsidetextfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Outsidetextfont object
Sets the font used for `text` lying outside the bar.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Outsidetextfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Outsidetextfont
"""
super().__init__("outsidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.Outsidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Outsidetextfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Outsidetextfont |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 3827,
"end": 7788
} | class ____(InputWidget):
''' Present a file-chooser dialog to users and return the contents of the
selected files.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Readonly(Either(String, List(String)), help='''
The base64-encoded contents of the file or files that were loaded.
If `multiple` is set to False (default), this value is a single string with the contents
of the single file that was chosen.
If `multiple` is True, this value is a list of strings, each containing the contents of
one of the multiple files that were chosen.
The sequence of files is given by the list of filenames (see below)
''')
mime_type = Readonly(Either(String, List(String)), help='''
The mime-type of the file or files that were loaded.
If `multiple` is set to False (default), this value is a single string with the
mime-type of the single file that was chosen.
If `multiple` is True, this value is a list of strings, each containing the
mime-type of one of the multiple files that were chosen.
The sequence of files is given by the list of filename (see below)
''')
filename = Readonly(Either(String, List(String)), help='''
The name(s) of the file or files that were loaded.
If `multiple` is set to False (default), this value is a single string with the
name of the single file that was chosen.
If `multiple` is True, this value is a list of strings, each containing the
name of one of the multiple files that were chosen.
This list provides the sequence of files for the respective lists in value and mime-type
.. note::
The full file path is not included since browsers will not provide
access to that information for security reasons.
''')
accept = Either(String, List(String), default="", help="""
Comma-separated list of standard HTML file input filters that restrict what
files the user can pick from. Values can be:
`<file extension>`:
Specific file extension(s) (e.g: .gif, .jpg, .png, .doc) are pickable
`audio/*`:
all sound files are pickable
`video/*`:
all video files are pickable
`image/*`:
all image files are pickable
`<media type>`:
A valid `IANA Media Type`_, with no parameters.
.. _IANA Media Type: https://www.iana.org/assignments/media-types/media-types.xhtml
.. note::
A bug in some versions of Chrome on macOS Big Sur may limit
how you can set a file input filter for those users. In those cases,
it is impossible to limit the user's selection to specific file
extensions - instead, the browser will limit users to predefined sets of
file types, such as ``Text/*`` or ``Image/*``. See :bokeh-issue:`10888`
for more information.
""")
multiple = Bool(default=False, help="""
set multiple=False (default) for single file selection, set multiple=True if
selection of more than one file at a time should be possible.
""")
directory = Bool(default=False, help="""
Whether to allow selection of directories instead of files.
The filename will be relative paths to the uploaded directory.
.. note::
When a directory is uploaded it will give add a confirmation pop up.
The confirmation pop up cannot be disabled, as this is a security feature
in the browser.
.. note::
The `accept` parameter only works with file extension.
When using `accept` with `directory`, the number of files
reported will be the total amount of files, not the filtered.
""")
def clear(self) -> None:
""" Clear the contents of this file input widget.
"""
doc = self.document
if doc is not None:
doc.callbacks.send_event(ClearInput(self))
| FileInput |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_set.py | {
"start": 1051,
"end": 3980
} | class ____(ABC, DagsterModel):
"""Base class for defining a set of key-value pairs in the same namespace.
Includes shared behavior between NamespacedMetadataSet and NamespacedTagSet.
"""
@classmethod
@abstractmethod
def namespace(cls) -> str:
raise NotImplementedError()
@classmethod
def _namespaced_key(cls, key: str) -> str:
return f"{cls.namespace()}/{key}"
@staticmethod
def _strip_namespace_from_key(key: str) -> str:
return key.split("/", 1)[1]
def keys(self) -> Iterable[str]:
return [
self._namespaced_key(key)
for key in model_fields(self.__class__).keys()
# getattr returns the pydantic property on the subclass
if getattr(self, key) is not None
]
def __getitem__(self, key: str) -> Any:
# getattr returns the pydantic property on the subclass
return getattr(self, self._strip_namespace_from_key(key))
@classmethod
def extract(cls: type[T_NamespacedKVSet], values: Mapping[str, Any]) -> T_NamespacedKVSet:
"""Extracts entries from the provided dictionary into an instance of this class.
Ignores any entries in the dictionary whose keys don't correspond to fields on this
class.
In general, the following should always pass:
.. code-block:: python
class MyKVSet(NamespacedKVSet):
...
metadata: MyKVSet = ...
assert MyKVSet.extract(dict(metadata)) == metadata
Args:
values (Mapping[str, Any]): A dictionary of entries to extract.
"""
kwargs = {}
for namespaced_key, value in values.items():
splits = namespaced_key.split("/")
if len(splits) == 2:
namespace, key = splits
if namespace == cls.namespace() and key in model_fields(cls):
kwargs[key] = cls._extract_value(field_name=key, value=value)
elif namespace == cls.namespace() and key in cls.current_key_by_legacy_key():
current_key = cls.current_key_by_legacy_key()[key]
if f"{cls.namespace()}/{current_key}" not in values:
# Only extract the value from the backcompat key if the new
# key is not present
kwargs[current_key] = cls._extract_value(
field_name=current_key, value=value
)
return cls(**kwargs)
@classmethod
@abstractmethod
def _extract_value(cls, field_name: str, value: Any) -> Any:
"""Based on type annotation, potentially coerce the value to the expected type."""
...
@classmethod
def current_key_by_legacy_key(cls) -> Mapping[str, str]:
"""Return a mapping of each legacy key to its current key."""
return {}
| NamespacedKVSet |
python | django__django | tests/custom_lookups/tests.py | {
"start": 6903,
"end": 7210
} | class ____(models.Transform):
lookup_name = "as_datetime"
@property
def output_field(self):
return models.DateTimeField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "from_unixtime({})".format(lhs), params
| DateTimeTransform |
python | huggingface__transformers | src/transformers/models/olmoe/modeling_olmoe.py | {
"start": 9793,
"end": 13766
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: OlmoeConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.q_norm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.k_norm = OlmoeRMSNorm(
(config.hidden_size // config.num_attention_heads) * config.num_key_value_heads, eps=config.rms_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
if self.config.clip_qkv is not None: # Diff with llama
query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
query_states = query_states.view(*hidden_shape).transpose(1, 2)
key_states = key_states.view(*hidden_shape).transpose(1, 2)
value_states = value_states.view(*hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| OlmoeAttention |
python | pytorch__pytorch | torch/autograd/profiler.py | {
"start": 37679,
"end": 44056
} | class ____:
"""Context manager that makes every autograd operation emit an NVTX range.
It is useful when running the program under nvprof::
nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
Unfortunately, there's no way to force nvprof to flush the data it collected
to disk, so for CUDA profiling one has to use this context manager to annotate
nvprof traces and wait for the process to exit before inspecting them.
Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or
:func:`torch.autograd.profiler.load_nvprof` can load the results for inspection
e.g. in Python REPL.
.. warning:
This context manager should not be called recursively, i.e. at most one
instance should be enabled at any given time.
Args:
enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op.
Default: ``True``.
record_shapes (bool, optional): If ``record_shapes=True``, the nvtx range wrapping
each autograd op will append information about the sizes of Tensor arguments received
by that op, in the following format:
``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
Non-tensor arguments will be represented by ``[]``.
Arguments will be listed in the order they are received by the backend op.
Please note that this order may not match the order in which those arguments were passed
on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.
Default: ``False``
Example:
>>> # xdoctest: +SKIP("undefined variables")
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER)
>>> with torch.cuda.profiler.profile():
... model(x) # Warmup CUDA memory allocator and profiler
... with torch.autograd.profiler.emit_nvtx():
... model(x)
**Forward-backward correlation**
When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,
correlating each backward-pass op with the corresponding forward-pass op can be difficult.
To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it
generates.
During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running
counter, incremented each time a new backward Function object is created and stashed for backward.
Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that
if a backward Function object is created by this forward function,
the backward object will receive sequence number N.
During the backward pass, the top-level range wrapping each C++ backward Function's
``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that
the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq``
numbers in forward, you can track down which forward op created each backward Function.
Any functions executed during the backward pass are also decorated with ``seq=<N>``. During
default backward (with ``create_graph=False``) this information is irrelevant, and in fact,
``N`` may simply be 0 for all such functions. Only the top-level ranges associated with
backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function
objects with the earlier forward pass.
**Double-backward**
If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,
if you are setting up for a double-backward), each function's execution during backward
is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects
to be executed later during double-backward, just as the original functions in the forward pass did.
The relationship between backward and double-backward is conceptually the same as the relationship
between forward and backward: The functions still emit current-sequence-number-tagged ranges,
the Function objects they create still stash those sequence numbers, and during the eventual
double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq``
numbers, which can be compared to `seq` numbers from the backward pass.
.. warning:
The sequence number is thread-local, and some forward functions don't create an associated
backward Function object (instead delegating that to sub-functions further down the call chain).
For these reasons, the correspondence of stashed sequence numbers in
backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is
not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully
disambiguate which forward function created which
backward Function object. You may need to make a judgment based on analytic knowledge of what
the expected correspondence should be.
"""
def __init__(self, enabled=True, record_shapes=False):
self.enabled = enabled
self.entered = False
self.record_shapes = record_shapes
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("NVTX annotation context manager is not reentrant")
self.entered = True
torch.cuda.synchronize()
_run_on_profiler_start()
_enable_profiler(
ProfilerConfig(
ProfilerState.NVTX,
self.record_shapes,
False,
False,
False,
False,
_ExperimentalConfig(),
),
set(),
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
torch.cuda.synchronize()
_disable_profiler()
_run_on_profiler_stop()
return False
def load_nvprof(path):
"""Open an nvprof trace file and parses autograd annotations.
Args:
path (str): path to nvprof trace
"""
return EventList(parse_nvprof_trace(path))
| emit_nvtx |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_isin.py | {
"start": 897,
"end": 1884
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.to_be_valid_isin"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_isin(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidIsin |
python | openai__openai-python | src/openai/resources/videos.py | {
"start": 30937,
"end": 31700
} | class ____:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = async_to_streamed_response_wrapper(
videos.create,
)
self.retrieve = async_to_streamed_response_wrapper(
videos.retrieve,
)
self.list = async_to_streamed_response_wrapper(
videos.list,
)
self.delete = async_to_streamed_response_wrapper(
videos.delete,
)
self.download_content = async_to_custom_streamed_response_wrapper(
videos.download_content,
AsyncStreamedBinaryAPIResponse,
)
self.remix = async_to_streamed_response_wrapper(
videos.remix,
)
| AsyncVideosWithStreamingResponse |
python | wandb__wandb | wandb/integration/diffusers/pipeline_resolver.py | {
"start": 209,
"end": 1834
} | class ____:
"""Resolver for `DiffusionPipeline` request and responses from [HuggingFace Diffusers](https://huggingface.co/docs/diffusers/index), providing necessary data transformations, formatting, and logging.
This is based off `wandb.sdk.integration_utils.auto_logging.RequestResponseResolver`.
"""
def __init__(self) -> None:
self.wandb_table = None
self.pipeline_call_count = 1
def __call__(
self,
args: Sequence[Any],
kwargs: Dict[str, Any],
response: Response,
start_time: float,
time_elapsed: float,
) -> Any:
"""Main call method for the `DiffusersPipelineResolver` class.
Args:
args: (Sequence[Any]) List of arguments.
kwargs: (Dict[str, Any]) Dictionary of keyword arguments.
response: (wandb.sdk.integration_utils.auto_logging.Response) The response from
the request.
start_time: (float) Time when request started.
time_elapsed: (float) Time elapsed for the request.
Returns:
Packed data as a dictionary for logging to wandb, None if an exception occurred.
"""
pipeline_name = args[0].__class__.__name__
resolver = None
if pipeline_name in SUPPORTED_MULTIMODAL_PIPELINES:
resolver = DiffusersMultiModalPipelineResolver(
pipeline_name, self.pipeline_call_count
)
self.pipeline_call_count += 1
loggable_dict = resolver(args, kwargs, response, start_time, time_elapsed)
return loggable_dict
| DiffusersPipelineResolver |
python | apache__airflow | devel-common/src/tests_common/test_utils/perf/perf_kit/sqlalchemy.py | {
"start": 5159,
"end": 7474
} | class ____:
"""
Counts the number of queries sent to Airflow Database in a given context.
Does not support multiple processes. When a new process is started in context, its queries will
not be included.
:param print_fn: The function used to display the text. By default, ``builtins.print``
"""
def __init__(self, print_fn: Callable[[str], None] = print):
self.result = CountQueriesResult()
self.print_fn = print_fn
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
return self.result
def __exit__(self, type_, value, traceback):
import airflow.settings
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
self.print_fn(f"Count SQL queries: {self.result.count}")
def after_cursor_execute(
self,
conn,
cursor,
statement,
parameters,
context,
executemany,
):
"""
Execute after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
"""
self.result.count += 1
count_queries = CountQueries
if __name__ == "__main__":
# Example:
def case():
"""Case of logging om/."""
import logging
from unittest import mock
from airflow.dag_processing.processor import DagFileProcessor
with mock.patch.dict(
"os.environ",
{
"PERF_DAGS_COUNT": "200",
"PERF_TASKS_COUNT": "10",
"PERF_START_AGO": "2d",
"PERF_SCHEDULE_INTERVAL": "None",
"PERF_SHAPE": "no_structure",
},
):
log = logging.getLogger(__name__)
processor = DagFileProcessor(dag_ids=[], dag_directory="/tmp", log=log)
dag_file = os.path.join(os.path.dirname(__file__), os.path.pardir, "dags", "elastic_dag.py")
processor.process_file(file_path=dag_file, callback_requests=[])
with trace_queries(), count_queries():
case()
| CountQueries |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/utils/websocket_client.py | {
"start": 35251,
"end": 35644
} | class ____(_WebSocketHBChannel, SuperQObject):
"""A heartbeat channel emitting a Qt signal when a message is received."""
# Emitted when the kernel has died.
kernel_died = Signal(float)
def call_handlers(self, since_last_heartbeat):
"""Reimplemented to emit signal."""
# Emit the generic signal.
self.kernel_died.emit(since_last_heartbeat)
| QtWSHBChannel |
python | langchain-ai__langchain | libs/core/langchain_core/messages/ai.py | {
"start": 2088,
"end": 2757
} | class ____(TypedDict, total=False):
"""Breakdown of output token counts.
Does *not* need to sum to full output token count. Does *not* need to have all keys.
Example:
```python
{
"audio": 10,
"reasoning": 200,
}
```
May also hold extra provider-specific keys.
!!! version-added "Added in `langchain-core` 0.3.9"
"""
audio: int
"""Audio output tokens."""
reasoning: int
"""Reasoning output tokens.
Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1
models) that are not returned as part of model output.
"""
| OutputTokenDetails |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/unicode_encode_op_test.py | {
"start": 1352,
"end": 17579
} | class ____(test.TestCase, parameterized.TestCase):
def assertAllEqual(self, rt, expected):
with self.cached_session() as sess:
value = sess.run(rt)
if isinstance(value, np.ndarray):
value = value.tolist()
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
value = value.to_list()
self.assertEqual(value, expected)
def testScalar(self):
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(72, "UTF-8")
with self.cached_session():
with self.assertRaises(ValueError):
ragged_string_ops.unicode_encode(constant_op.constant(72), "UTF-8")
def testRequireParams(self):
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode() # pylint: disable=no-value-for-parameter
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(72) # pylint: disable=no-value-for-parameter
with self.cached_session():
with self.assertRaises(TypeError):
ragged_string_ops.unicode_encode(encoding="UTF-8") # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrorsValid(self, encoding):
# Noncharacters are not errors.
# See https://www.unicode.org/versions/corrigendum9.html.
test_value = np.array([ord("H"), ord("e"), ord("o"), 0, ord("\b"), 0x1F600,
0xFDD0, # noncharacter
0xFFFE, 0xFFFF, # last two in BMP
0x10FFFF], # largest valid
np.int32)
expected_value = (
u"Heo\0\b\U0001f600\ufdd0\ufffe\uffff\U0010ffff"
).encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"strict")
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrorsNegative(self, encoding):
test_value = np.array([-1], np.int32)
with self.cached_session() as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(
ragged_string_ops.unicode_encode(test_value, encoding, "strict"))
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrorsTooLarge(self, encoding):
test_value = np.array([0x110000], np.int32)
with self.cached_session() as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(
ragged_string_ops.unicode_encode(test_value, encoding, "strict"))
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
def testStrictErrorsSurrogatePair(self, encoding):
test_value = np.array([0xD83D, 0xDE00], np.int32)
with self.cached_session() as session:
with self.assertRaises(errors.InvalidArgumentError):
session.run(
ragged_string_ops.unicode_encode(test_value, encoding, "strict"))
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testIgnoreErrors(self, encoding):
# Noncharacters are not errors.
# See https://www.unicode.org/versions/corrigendum9.html.
test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o'),
0, ord('\b'), 0x1F600, # valid
0xD83D, 0xDE00, # invalid, surrogate code points
0xFDD0, # noncharacter
0xFFFE, 0xFFFF, # last two in BMP
0x10FFFF, # last in plane 16 = last in Unicode
0x110000], # invalid, beyond Unicode
np.int32)
expected_value = (
u"Heo\0\b\U0001F600\ufdd0\ufffe\uffff\U0010ffff"
).encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"ignore")
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testIgnoreErrorsSurrogate(self, encoding):
test_value = np.array([0xD83D, 0xDE00], np.int32)
expected_value = b""
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"ignore")
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testReplaceErrors(self, encoding):
# Noncharacters are not errors.
# See https://www.unicode.org/versions/corrigendum9.html.
test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o'),
0, ord('\b'), 0x1F600, # valid
0xD83D, 0xDE00, # invalid, surrogate code points
0xFDD0, # noncharacter
0xFFFE, 0xFFFF, # last two in BMP
0x10FFFF, # last in plane 16 = last in Unicode
0x110000], # invalid, beyond Unicode
np.int32)
expected_value = (u"He\ufffd\ufffdo\0\b\U0001F600\ufffd\ufffd"
u"\ufdd0\ufffe\uffff\U0010ffff\ufffd").encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace")
self.assertAllEqual(unicode_encode_op, expected_value)
# Test custom replacement character
test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')],
np.int32)
expected_value = u"Heooo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", ord('o'))
self.assertAllEqual(unicode_encode_op, expected_value)
# Verify "replace" is default
test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')],
np.int32)
expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# Verify non-default replacement with an unpaired surrogate.
test_value = np.array([0xD801], np.int32)
expected_value = "\u0041".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 0x41)
self.assertAllEqual(unicode_encode_op, expected_value)
# Test with a noncharacter code point. These are not errors.
# See https://www.unicode.org/versions/corrigendum9.html.
test_value = np.array([0x1FFFF], np.int32)
expected_value = u"\U0001ffff".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 0x41)
self.assertAllEqual(unicode_encode_op, expected_value)
# Replacement_char must be within range
test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')],
np.int32)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding,
"replace", 0x110000)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(unicode_encode_op)
# -- regular Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testVector(self, encoding):
test_value = np.array([ord('H'), ord('e'), ord('l'), ord('l'), ord('o')],
np.int32)
expected_value = u"Hello".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
test_value = np.array([ord('H'), ord('e'), 0xC3, 0xC3, 0x1F604], np.int32)
expected_value = u"He\xc3\xc3\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# Single character string
test_value = np.array([ord('H')], np.int32)
expected_value = u"H".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
test_value = np.array([0x1F604], np.int32)
expected_value = u"\U0001f604".encode(encoding)
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testMatrix(self, encoding):
test_value = np.array(
[[72, 0x1F604, 108, 108, 111], [87, 0x1F604, 114, 108, 100]], np.int32)
expected_value = [
u"H\U0001f604llo".encode(encoding), u"W\U0001f604rld".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100], [119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114], [99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding), u"cube.".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimMatrix(self, encoding):
test_value = constant_op.constant(
[[[[72, 101, 108, 108, 111]], [[87, 111, 114, 108, 100]]],
[[[102, 105, 120, 101, 100]], [[119, 111, 114, 100, 115]]],
[[[72, 121, 112, 101, 114]], [[99, 117, 98, 101, 46]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding)],
[u"World".encode(encoding)]],
[[u"fixed".encode(encoding)],
[u"words".encode(encoding)]],
[[u"Hyper".encode(encoding)],
[u"cube.".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
# -- Ragged Tensor tests -- #
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[ord('H'), 0xC3, ord('l'), ord('l'), ord('o')],
[ord('W'), 0x1F604, ord('r'), ord('l'), ord('d'), ord('.')]], np.int32)
expected_value = [
u"H\xc3llo".encode(encoding), u"W\U0001f604rld.".encode(encoding)
]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100]],
[[72, 121, 112, 101, 114], [119, 111, 114, 100, 115],
[99, 117, 98, 101, 46]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)],
[u"fixed".encode(encoding)],
[
u"Hyper".encode(encoding), u"words".encode(encoding),
u"cube.".encode(encoding)
]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]],
[[68, 111, 110, 39, 116], [119, 195, 114, 114, 121, 44, 32, 98, 101]],
[[0x1F604], []]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[
u"Don't".encode(encoding),
u"w\xc3rry, be".encode(encoding)
], [u"\U0001f604".encode(encoding), u"".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test3DimMatrixWithRagged2ndAnd3rdDim(self, encoding):
test_value = ragged_factory_ops.constant(
[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]], [],
[[0x1F604]]], np.int32)
expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)],
[], [u"\U0001f604".encode(encoding)]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def test4DimRaggedMatrix(self, encoding):
test_value = ragged_factory_ops.constant(
[[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]]],
[[[]], [[72, 121, 112, 101]]]], np.int32)
expected_value = [[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[[u"".encode(encoding)], [u"Hype".encode(encoding)]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
@parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE")
@test_util.run_v1_only("b/120545219")
def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding):
test_flat_values = constant_op.constant([[[72, 101, 108, 108, 111],
[87, 111, 114, 108, 100]],
[[102, 105, 120, 101, 100],
[119, 111, 114, 100, 115]],
[[72, 121, 112, 101, 114],
[99, 117, 98, 101, 46]]])
test_row_splits = [
constant_op.constant([0, 2, 3], dtype=np.int64),
constant_op.constant([0, 1, 1, 3], dtype=np.int64)
]
test_value = ragged_tensor.RaggedTensor.from_nested_row_splits(
test_flat_values, test_row_splits)
expected_value = [[[[u"Hello".encode(encoding), u"World".encode(encoding)]],
[]],
[[[u"fixed".encode(encoding), u"words".encode(encoding)],
[u"Hyper".encode(encoding),
u"cube.".encode(encoding)]]]]
unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
self.assertAllEqual(unicode_encode_op, expected_value)
def testUnknownInputRankError(self):
# Use a tf.function that erases shape information.
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def f(v):
return ragged_string_ops.unicode_encode(v, "UTF-8")
with self.assertRaisesRegex(
ValueError, "Rank of input_tensor must be statically known."):
f([72, 101, 108, 108, 111])
if __name__ == "__main__":
test.main()
| UnicodeEncodeOpTest |
python | numba__numba | numba/core/imputils.py | {
"start": 9646,
"end": 14889
} | class ____(Enum):
"""
Enumerate the reference type
"""
"""
A new reference
"""
NEW = 1
"""
A borrowed reference
"""
BORROWED = 2
"""
An untracked reference
"""
UNTRACKED = 3
def iternext_impl(ref_type=None):
"""
Wrap the given iternext() implementation so that it gets passed
an _IternextResult() object easing the returning of the iternext()
result pair.
ref_type: a numba.targets.imputils.RefType value, the reference type used is
that specified through the RefType enum.
The wrapped function will be called with the following signature:
(context, builder, sig, args, iternext_result)
"""
if ref_type not in [x for x in RefType]:
raise ValueError("ref_type must be an enum member of imputils.RefType")
def outer(func):
def wrapper(context, builder, sig, args):
pair_type = sig.return_type
pairobj = context.make_helper(builder, pair_type)
func(context, builder, sig, args,
_IternextResult(context, builder, pairobj))
if ref_type == RefType.NEW:
impl_ret = impl_ret_new_ref
elif ref_type == RefType.BORROWED:
impl_ret = impl_ret_borrowed
elif ref_type == RefType.UNTRACKED:
impl_ret = impl_ret_untracked
else:
raise ValueError("Unknown ref_type encountered")
return impl_ret(context, builder,
pair_type, pairobj._getvalue())
return wrapper
return outer
def call_getiter(context, builder, iterable_type, val):
"""
Call the `getiter()` implementation for the given *iterable_type*
of value *val*, and return the corresponding LLVM inst.
"""
getiter_sig = typing.signature(iterable_type.iterator_type, iterable_type)
getiter_impl = context.get_function('getiter', getiter_sig)
return getiter_impl(builder, (val,))
def call_iternext(context, builder, iterator_type, val):
"""
Call the `iternext()` implementation for the given *iterator_type*
of value *val*, and return a convenience _IternextResult() object
reflecting the results.
"""
itemty = iterator_type.yield_type
pair_type = types.Pair(itemty, types.boolean)
iternext_sig = typing.signature(pair_type, iterator_type)
iternext_impl = context.get_function('iternext', iternext_sig)
val = iternext_impl(builder, (val,))
pairobj = context.make_helper(builder, pair_type, val)
return _IternextResult(context, builder, pairobj)
def call_len(context, builder, ty, val):
"""
Call len() on the given value. Return None if len() isn't defined on
this type.
"""
try:
len_impl = context.get_function(len, typing.signature(types.intp, ty,))
except NotImplementedError:
return None
else:
return len_impl(builder, (val,))
_ForIterLoop = collections.namedtuple('_ForIterLoop',
('value', 'do_break'))
@contextlib.contextmanager
def for_iter(context, builder, iterable_type, val):
"""
Simulate a for loop on the given iterable. Yields a namedtuple with
the given members:
- `value` is the value being yielded
- `do_break` is a callable to early out of the loop
"""
iterator_type = iterable_type.iterator_type
iterval = call_getiter(context, builder, iterable_type, val)
bb_body = builder.append_basic_block('for_iter.body')
bb_end = builder.append_basic_block('for_iter.end')
def do_break():
builder.branch(bb_end)
builder.branch(bb_body)
with builder.goto_block(bb_body):
res = call_iternext(context, builder, iterator_type, iterval)
with builder.if_then(builder.not_(res.is_valid()), likely=False):
builder.branch(bb_end)
yield _ForIterLoop(res.yielded_value(), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
if context.enable_nrt:
context.nrt.decref(builder, iterator_type, iterval)
def impl_ret_new_ref(ctx, builder, retty, ret):
"""
The implementation returns a new reference.
"""
return ret
def impl_ret_borrowed(ctx, builder, retty, ret):
"""
The implementation returns a borrowed reference.
This function automatically incref so that the implementation is
returning a new reference.
"""
if ctx.enable_nrt:
ctx.nrt.incref(builder, retty, ret)
return ret
def impl_ret_untracked(ctx, builder, retty, ret):
"""
The return type is not a NRT object.
"""
return ret
@contextlib.contextmanager
def force_error_model(context, model_name='numpy'):
"""
Temporarily change the context's error model.
"""
from numba.core import callconv
old_error_model = context.error_model
context.error_model = callconv.create_error_model(model_name, context)
try:
yield
finally:
context.error_model = old_error_model
def numba_typeref_ctor(*args, **kwargs):
"""A stub for use internally by Numba when a call is emitted
on a TypeRef.
"""
raise NotImplementedError("This function should not be executed.")
| RefType |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | {
"start": 1310,
"end": 2783
} | class ____(object):
"""Representation of possible resolution results of a package.
This holds three attributes:
* `information` is a collection of `RequirementInformation` pairs.
Each pair is a requirement contributing to this criterion, and the
candidate that provides the requirement.
* `incompatibilities` is a collection of all known not-to-work candidates
to exclude from consideration.
* `candidates` is a collection containing all possible candidates deducted
from the union of contributing requirements and known incompatibilities.
It should never be empty, except when the criterion is an attribute of a
raised `RequirementsConflicted` (in which case it is always empty).
.. note::
This class is intended to be externally immutable. **Do not** mutate
any of its attribute containers.
"""
def __init__(self, candidates, information, incompatibilities):
self.candidates = candidates
self.information = information
self.incompatibilities = incompatibilities
def __repr__(self):
requirements = ", ".join(
"({!r}, via={!r})".format(req, parent)
for req, parent in self.information
)
return "Criterion({})".format(requirements)
def iter_requirement(self):
return (i.requirement for i in self.information)
def iter_parent(self):
return (i.parent for i in self.information)
| Criterion |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/sql.py | {
"start": 7505,
"end": 7962
} | class ____(db.Text):
"""Allows customization of certain fields to map to LONGTEXT in MySQL. For Postgres, all text
fields are mapped to TEXT, which is unbounded in length, so the distinction is not neccessary.
In MySQL, however, TEXT is limited to 64KB, so LONGTEXT (4GB) is required for certain fields.
"""
pass
@compiles(LongText, "mysql")
def compile_longtext_mysql(_element, _compiler, **_kw) -> str:
return "LONGTEXT"
| LongText |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/test_misconfigured_expectations.py | {
"start": 3609,
"end": 5889
} | class ____:
_DATA = pd.DataFrame({"a": [1, 2]})
_EXPECTATION = gxe.ExpectColumnMedianToBeBetween(column="b", min_value=5, max_value=10)
@parameterize_batch_for_data_sources(
data_source_configs=[*PANDAS_DATA_SOURCES, *SQL_DATA_SOURCES],
data=_DATA,
)
def test_pandas_and_sql(self, batch_for_datasource) -> None:
self._assert_misconfiguration(
batch_for_datasource=batch_for_datasource,
exception_message='The column "b" in BatchData does not exist',
)
@parameterize_batch_for_data_sources(
data_source_configs=SPARK_DATA_SOURCES,
data=_DATA,
)
def test_spark(self, batch_for_datasource) -> None:
self._assert_misconfiguration(
batch_for_datasource=batch_for_datasource,
exception_message="A column or function parameter with name `b` cannot be resolved",
)
def _assert_misconfiguration(self, batch_for_datasource: Batch, exception_message: str) -> None:
result = batch_for_datasource.validate(self._EXPECTATION)
assert not result.success
assert exception_message in str(result.exception_info)
@parameterize_batch_for_data_sources(
data_source_configs=ALL_DATA_SOURCES,
data=pd.DataFrame({"a": [1, 2]}),
)
def test_datetime_expectation_against_numeric_data_misconfiguration(batch_for_datasource) -> None:
expectation = gxe.ExpectColumnMaxToBeBetween(
column="a",
min_value=dt.datetime(2024, 1, 1, tzinfo=dt.timezone.utc),
max_value=dt.datetime(2024, 1, 2, tzinfo=dt.timezone.utc),
strict_max=True,
)
result = batch_for_datasource.validate(expectation)
assert not result.success
assert "into datetime representation" in str(result.exception_info)
@parameterize_batch_for_data_sources(
data_source_configs=ALL_DATA_SOURCES,
data=pd.DataFrame({"a": [1, 2]}),
)
def test_column_min_max_mismatch_misconfiguration(batch_for_datasource) -> None:
expectation = gxe.ExpectColumnValuesToBeBetween(column="a", min_value=2, max_value=1)
result = batch_for_datasource.validate(expectation)
assert not result.success
assert "min_value cannot be greater than max_value" in str(result.exception_info)
| TestNonExistentColumnMisconfiguration |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 37291,
"end": 37674
} | class ____(ColumnArg):
"""Validate that the argument is either a column or a valid tag"""
def normalize(
self, value: str, params: ParamsType, combinator: Combinator | None
) -> str | list[Any]:
if TAG_KEY_RE.match(value) or VALID_FIELD_PATTERN.match(value):
return value
return super().normalize(value, params, combinator)
| ColumnTagArg |
python | google__pytype | pytype/pytd/codegen/function.py | {
"start": 153,
"end": 394
} | class ____(Exception):
"""Inconsistent decorators on an overloaded function."""
def __init__(self, name, typ):
msg = f"Overloaded signatures for '{name}' disagree on {typ} decorators"
super().__init__(msg)
| OverloadedDecoratorError |
python | apache__airflow | airflow-core/tests/unit/callbacks/test_callback_requests.py | {
"start": 4337,
"end": 6912
} | class ____:
def test_dagrun_context_creation(self):
"""Test DagRunContext can be created with dag_run and first_ti"""
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="running",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
assert context.dag_run == dag_run_data
assert context.last_ti == ti_data
def test_dagrun_context_none_values(self):
"""Test DagRunContext can be created with None values"""
context = DagRunContext()
assert context.dag_run is None
assert context.last_ti is None
def test_dagrun_context_serialization(self):
"""Test DagRunContext can be serialized and deserialized"""
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="running",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
# Test serialization
serialized = context.model_dump_json()
# Test deserialization
deserialized = DagRunContext.model_validate_json(serialized)
assert deserialized.dag_run.dag_id == context.dag_run.dag_id
assert deserialized.last_ti.task_id == context.last_ti.task_id
| TestDagRunContext |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 54523,
"end": 55297
} | class ____(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
| CSVReader |
python | doocs__leetcode | solution/3100-3199/3147.Taking Maximum Energy From the Mystic Dungeon/Solution.py | {
"start": 0,
"end": 313
} | class ____:
def maximumEnergy(self, energy: List[int], k: int) -> int:
ans = -inf
n = len(energy)
for i in range(n - k, n):
j, s = i, 0
while j >= 0:
s += energy[j]
ans = max(ans, s)
j -= k
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/tensor_array_ops.py | {
"start": 37030,
"end": 51638
} | class ____:
"""Class wrapping dynamic-sized, per-time-step, Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
Note that although the array can be read multiple times and positions can be
overwritten, behavior may be undefined when storing multiple references to
the same array and clear_after_read is False. In particular, avoid using
methods like concat() to convert an intermediate TensorArray to a Tensor,
then further modifying the TensorArray, particularly if you need to backprop
through it later.
Example 1: Plain reading and writing.
>>> ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=False)
>>> ta = ta.write(0, 10)
>>> ta = ta.write(1, 20)
>>> ta = ta.write(2, 30)
>>>
>>> ta.read(0)
<tf.Tensor: shape=(), dtype=float32, numpy=10.0>
>>> ta.read(1)
<tf.Tensor: shape=(), dtype=float32, numpy=20.0>
>>> ta.read(2)
<tf.Tensor: shape=(), dtype=float32, numpy=30.0>
>>> ta.stack()
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([10., 20., 30.],
dtype=float32)>
Example 2: Fibonacci sequence algorithm that writes in a loop then returns.
>>> @tf.function
... def fibonacci(n):
... ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True)
... ta = ta.unstack([0., 1.])
...
... for i in range(2, n):
... ta = ta.write(i, ta.read(i - 1) + ta.read(i - 2))
...
... return ta.stack()
>>>
>>> fibonacci(7)
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([0., 1., 1., 2., 3., 5., 8.], dtype=float32)>
Example 3: A simple loop interacting with a `tf.Variable`.
>>> v = tf.Variable(1)
>>> @tf.function
... def f(x):
... ta = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
... for i in tf.range(x):
... v.assign_add(i)
... ta = ta.write(i, v)
... return ta.stack()
>>> f(5)
<tf.Tensor: shape=(5,), dtype=int32, numpy=array([ 1, 2, 4, 7, 11],
dtype=int32)>
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None. Only supported in graph mode.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`. Only supported in graph mode.
infer_shape: (optional, default: True) If True, shape inference is
enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray. Need
not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the device
context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if (context.executing_eagerly() and
(flow is None or flow.dtype != dtypes.variant)):
# It is possible to create a Variant-style TensorArray even in eager mode,
# and this is fine but can have performance implications in eager.
# An example of when this happens is if a tf.function returns a
# TensorArray in its output; its flow variant object is returned to Eager.
# This can be wrapped back up in a Variant-style TensorArray.
implementation = _EagerTensorArray
elif (flow is not None and flow.dtype == dtypes.variant or
control_flow_util.EnableControlFlowV2(ops.get_default_graph())):
implementation = _GraphTensorArrayV2
else:
implementation = _GraphTensorArray
self._implementation = implementation(
dtype,
size=size,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
handle=handle,
flow=flow,
infer_shape=infer_shape,
element_shape=element_shape,
colocate_with_first_write_call=colocate_with_first_write_call,
name=name)
self._implementation.parent = weakref.ref(self)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._implementation._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._implementation._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._implementation.handle
@property
def element_shape(self):
"""The `tf.TensorShape` of elements in this TensorArray."""
return self._implementation.element_shape
@property
def dynamic_size(self):
"""Python bool; if `True` the TensorArray can grow dynamically."""
return self._implementation._dynamic_size
@property
def _infer_shape(self):
# TODO(slebedev): consider making public or changing TensorArrayStructure
# to access _implementation directly. Note that dynamic_size is also
# only used by TensorArrayStructure.
return self._implementation._infer_shape
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object for all subsequent operations.
"""
return self._implementation.identity()
def grad(self, source, flow=None, name=None):
return self._implementation.grad(source, flow=flow, name=name)
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
return self._implementation.read(index, name=name)
@tf_should_use.should_use_result(warn_in_eager=True)
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
return self._implementation.write(index, value, name=name)
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
For example:
>>> ta = tf.TensorArray(tf.int32, size=3)
>>> ta = ta.write(0, tf.constant([1, 2]))
>>> ta = ta.write(1, tf.constant([3, 4]))
>>> ta = ta.write(2, tf.constant([5, 6]))
>>> ta.stack()
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
return self._implementation.stack(name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the
`TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The tensors in the `TensorArray` selected by `indices`, packed into one
tensor.
"""
return self._implementation.gather(indices, name=name)
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
return self._implementation.concat(name=name)
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.unstack(value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the
`TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.scatter(indices, value, name=name)
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting `value`
along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.split(value, lengths, name=name)
def size(self, name=None):
"""Return the size of the TensorArray."""
return self._implementation.size(name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
return self._implementation.close(name=name)
def __tf_tracing_type__(self, _):
return TensorArrayTraceType(self)
def build_ta_with_new_flow(old_ta, flow):
"""Builds a TensorArray with a new `flow` tensor."""
# Sometimes we get old_ta as the implementation, sometimes it's the
# TensorArray wrapper object.
impl = (old_ta._implementation if isinstance(old_ta, TensorArray) else old_ta)
if not context.executing_eagerly():
if (not isinstance(impl, _GraphTensorArrayV2) and
control_flow_util.EnableControlFlowV2(ops.get_default_graph())):
raise NotImplementedError("Attempting to build a graph-mode TF2-style "
"TensorArray from either an eager-mode "
"TensorArray or a TF1-style TensorArray. "
"This is not currently supported. You may be "
"attempting to capture a TensorArray "
"inside a tf.function or tf.data map function. "
"Instead, construct a new TensorArray inside "
"the function.")
new_ta = TensorArray(
dtype=impl.dtype,
handle=impl.handle,
flow=flow,
infer_shape=impl._infer_shape,
colocate_with_first_write_call=impl._colocate_with_first_write_call)
new_impl = new_ta._implementation
new_impl._dynamic_size = impl._dynamic_size
new_impl._size = impl._size
new_impl._colocate_with = impl._colocate_with
new_impl._element_shape = impl._element_shape # Share _element_shape.
return new_ta
# pylint: enable=protected-access
def _check_dtypes(value, dtype):
if value.dtype != dtype:
logging.error("Error: Input value {} has dtype {}, but expected dtype {}. "
"This leads to undefined behavior and will be an error "
"in future versions of TensorFlow. Traceback:\n{}".format(
value, str(value.dtype), str(dtype),
"".join(traceback.format_stack())))
@tf_export("TensorArraySpec")
@type_spec_registry.register("tf.TensorArraySpec")
| TensorArray |
python | ansible__ansible | lib/ansible/plugins/__init__.py | {
"start": 2433,
"end": 7162
} | class ____(_AnsiblePluginInfoMixin, _ConfigurablePlugin, metaclass=abc.ABCMeta):
# Set by plugin loader
_load_name: str
# allow extra passthrough parameters
allow_extras: bool = False
_extras_prefix: str | None = None
def __init__(self):
self._options = {}
self._origins = {}
self._defs = None
@property
def extras_prefix(self):
if not self._extras_prefix:
self._extras_prefix = self._load_name.split('.')[-1]
return self._extras_prefix
def matches_name(self, possible_names):
possible_fqcns = set()
for name in possible_names:
if '.' not in name:
possible_fqcns.add(f"ansible.builtin.{name}")
elif name.startswith("ansible.legacy."):
possible_fqcns.add(name.removeprefix("ansible.legacy."))
possible_fqcns.add(name)
return bool(possible_fqcns.intersection(set(self.ansible_aliases)))
def get_option_and_origin(self, option, hostvars=None):
if option not in self._options:
try:
# some plugins don't use set_option(s) and cannot use direct settings, so this populates the local copy for them
self._options[option], self._origins[option] = C.config.get_config_value_and_origin(option, plugin_type=self.plugin_type,
plugin_name=self._load_name, variables=hostvars)
except AnsibleError as e:
# callers expect key error on missing
raise KeyError() from e
return self._options[option], self._origins[option]
@functools.cached_property
def __plugin_info(self):
"""
Internal cached property to retrieve `PluginInfo` for this plugin instance.
Only for use by the `AnsiblePlugin` base class.
"""
return _plugin_info.get_plugin_info(self)
def get_option(self, option, hostvars=None):
if option not in self._options:
# let it populate _options
self.get_option_and_origin(option, hostvars=hostvars)
return self._options[option]
def get_options(self, hostvars=None):
options = {}
for option in self.option_definitions.keys():
options[option] = self.get_option(option, hostvars=hostvars)
return options
def set_option(self, option, value):
self._options[option] = C.config.get_config_value(option, plugin_type=self.plugin_type, plugin_name=self._load_name, direct={option: value})
self._origins[option] = 'Direct'
_display._report_config_warnings(self.__plugin_info)
def set_options(self, task_keys=None, var_options=None, direct=None):
"""
Sets the _options attribute with the configuration/keyword information for this plugin
:arg task_keys: Dict with playbook keywords that affect this option
:arg var_options: Dict with either 'connection variables'
:arg direct: Dict with 'direct assignment'
"""
self._options, self._origins = C.config.get_plugin_options_and_origins(self.plugin_type, self._load_name, keys=task_keys,
variables=var_options, direct=direct)
# allow extras/wildcards from vars that are not directly consumed in configuration
# this is needed to support things like winrm that can have extended protocol options we don't directly handle
if self.allow_extras and var_options and '_extras' in var_options:
# these are largely unvalidated passthroughs, either plugin or underlying API will validate
# TODO: deprecate and remove, most plugins that needed this don't use this facility anymore
self._options['_extras'] = var_options['_extras']
_display._report_config_warnings(self.__plugin_info)
def has_option(self, option):
if not self._options:
self.set_options()
return option in self._options
@property
def option_definitions(self):
if (not hasattr(self, "_defs")) or self._defs is None:
self._defs = C.config.get_configuration_definitions(plugin_type=self.plugin_type, name=self._load_name)
return self._defs
def _check_required(self):
# FIXME: standardize required check based on config
pass
def __repr__(self):
ansible_name = getattr(self, 'ansible_name', '(unknown)')
load_name = getattr(self, '_load_name', '(unknown)')
return f'{type(self).__name__}(plugin_type={self.plugin_type!r}, {ansible_name=!r}, {load_name=!r})'
| AnsiblePlugin |
python | numba__numba | numba/tests/test_types.py | {
"start": 9193,
"end": 11472
} | class ____(TestCase):
"""
Tests for number types.
"""
def test_bitwidth(self):
"""
All numeric types have bitwidth attribute
"""
for ty in types.number_domain:
self.assertTrue(hasattr(ty, "bitwidth"))
def test_minval_maxval(self):
self.assertEqual(types.int8.maxval, 127)
self.assertEqual(types.int8.minval, -128)
self.assertEqual(types.uint8.maxval, 255)
self.assertEqual(types.uint8.minval, 0)
self.assertEqual(types.int64.maxval, (1 << 63) - 1)
self.assertEqual(types.int64.minval, -(1 << 63))
self.assertEqual(types.uint64.maxval, (1 << 64) - 1)
self.assertEqual(types.uint64.minval, 0)
def test_from_bidwidth(self):
f = types.Integer.from_bitwidth
self.assertIs(f(32), types.int32)
self.assertIs(f(8, signed=False), types.uint8)
def test_ordering(self):
def check_order(values):
for i in range(len(values)):
self.assertLessEqual(values[i], values[i])
self.assertGreaterEqual(values[i], values[i])
self.assertFalse(values[i] < values[i])
self.assertFalse(values[i] > values[i])
for j in range(i):
self.assertLess(values[j], values[i])
self.assertLessEqual(values[j], values[i])
self.assertGreater(values[i], values[j])
self.assertGreaterEqual(values[i], values[j])
self.assertFalse(values[i] < values[j])
self.assertFalse(values[i] <= values[j])
self.assertFalse(values[j] > values[i])
self.assertFalse(values[j] >= values[i])
check_order([types.int8, types.int16, types.int32, types.int64])
check_order([types.uint8, types.uint16, types.uint32, types.uint64])
check_order([types.float32, types.float64])
check_order([types.complex64, types.complex128])
with self.assertRaises(TypeError):
types.int8 <= types.uint32
with self.assertRaises(TypeError):
types.int8 <= types.float32
with self.assertRaises(TypeError):
types.float64 <= types.complex128
| TestNumbers |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_aliases.py | {
"start": 942,
"end": 1301
} | class ____(GQLResult):
node: Optional[ArtifactAliasFragment]
ArtifactCollectionAliases.model_rebuild()
ArtifactCollectionAliasesArtifactCollection.model_rebuild()
ArtifactCollectionAliasesArtifactCollectionAliases.model_rebuild()
ArtifactCollectionAliasesArtifactCollectionAliasesEdges.model_rebuild()
| ArtifactCollectionAliasesArtifactCollectionAliasesEdges |
python | walkccc__LeetCode | solutions/2000. Reverse Prefix of Word/2000.py | {
"start": 0,
"end": 133
} | class ____:
def reversePrefix(self, word: str, ch: str) -> str:
i = word.find(ch) + 1
return word[:i][::-1] + word[i:]
| Solution |
python | spack__spack | lib/spack/spack/test/sbang.py | {
"start": 1669,
"end": 15182
} | class ____:
"""Directory full of test scripts to run sbang instrumentation on."""
def __init__(self, sbang_line):
self.tempdir = tempfile.mkdtemp()
self.directory = os.path.join(self.tempdir, "dir")
fs.mkdirp(self.directory)
# Script with short shebang
self.short_shebang = os.path.join(self.tempdir, "short")
with open(self.short_shebang, "w", encoding="utf-8") as f:
f.write(short_line)
f.write(last_line)
self.make_executable(self.short_shebang)
# Script with long shebang
self.long_shebang = os.path.join(self.tempdir, "long")
with open(self.long_shebang, "w", encoding="utf-8") as f:
f.write(long_line)
f.write(last_line)
self.make_executable(self.long_shebang)
# Non-executable script with long shebang
self.nonexec_long_shebang = os.path.join(self.tempdir, "nonexec_long")
with open(self.nonexec_long_shebang, "w", encoding="utf-8") as f:
f.write(long_line)
f.write(last_line)
# Lua script with long shebang
self.lua_shebang = os.path.join(self.tempdir, "lua")
with open(self.lua_shebang, "w", encoding="utf-8") as f:
f.write(lua_line)
f.write(last_line)
self.make_executable(self.lua_shebang)
# Lua occurring in text, not in shebang
self.lua_textbang = os.path.join(self.tempdir, "lua_in_text")
with open(self.lua_textbang, "w", encoding="utf-8") as f:
f.write(short_line)
f.write(lua_in_text)
f.write(last_line)
self.make_executable(self.lua_textbang)
# Luajit script with long shebang
self.luajit_shebang = os.path.join(self.tempdir, "luajit")
with open(self.luajit_shebang, "w", encoding="utf-8") as f:
f.write(luajit_line)
f.write(last_line)
self.make_executable(self.luajit_shebang)
# Luajit occuring in text, not in shebang
self.luajit_textbang = os.path.join(self.tempdir, "luajit_in_text")
with open(self.luajit_textbang, "w", encoding="utf-8") as f:
f.write(short_line)
f.write(luajit_in_text)
f.write(last_line)
self.make_executable(self.luajit_textbang)
# Node script with long shebang
self.node_shebang = os.path.join(self.tempdir, "node")
with open(self.node_shebang, "w", encoding="utf-8") as f:
f.write(node_line)
f.write(last_line)
self.make_executable(self.node_shebang)
# Node occuring in text, not in shebang
self.node_textbang = os.path.join(self.tempdir, "node_in_text")
with open(self.node_textbang, "w", encoding="utf-8") as f:
f.write(short_line)
f.write(node_in_text)
f.write(last_line)
self.make_executable(self.node_textbang)
# php script with long shebang
self.php_shebang = os.path.join(self.tempdir, "php")
with open(self.php_shebang, "w", encoding="utf-8") as f:
f.write(php_line)
f.write(last_line)
self.make_executable(self.php_shebang)
# php occuring in text, not in shebang
self.php_textbang = os.path.join(self.tempdir, "php_in_text")
with open(self.php_textbang, "w", encoding="utf-8") as f:
f.write(short_line)
f.write(php_in_text)
f.write(last_line)
self.make_executable(self.php_textbang)
# Script already using sbang.
self.has_sbang = os.path.join(self.tempdir, "shebang")
with open(self.has_sbang, "w", encoding="utf-8") as f:
f.write(sbang_line)
f.write(long_line)
f.write(last_line)
self.make_executable(self.has_sbang)
# Fake binary file.
self.binary = os.path.join(self.tempdir, "binary")
tar = which("tar", required=True)
tar("czf", self.binary, self.has_sbang)
self.make_executable(self.binary)
def destroy(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def make_executable(self, path):
# make a file executable
st = os.stat(path)
executable_mode = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(path, executable_mode)
st = os.stat(path)
assert oct(executable_mode) == oct(st.st_mode & executable_mode)
@pytest.fixture
def script_dir(sbang_line):
sdir = ScriptDirectory(sbang_line)
yield sdir
sdir.destroy()
@pytest.mark.parametrize(
"shebang,interpreter",
[
(b"#!/path/to/interpreter argument\n", b"/path/to/interpreter"),
(b"#! /path/to/interpreter truncated-argum", b"/path/to/interpreter"),
(b"#! \t \t/path/to/interpreter\t \targument", b"/path/to/interpreter"),
(b"#! \t \t /path/to/interpreter", b"/path/to/interpreter"),
(b"#!/path/to/interpreter\0", b"/path/to/interpreter"),
(b"#!/path/to/interpreter multiple args\n", b"/path/to/interpreter"),
(b"#!\0/path/to/interpreter arg\n", None),
(b"#!\n/path/to/interpreter arg\n", None),
(b"#!", None),
],
)
def test_shebang_interpreter_regex(shebang, interpreter):
assert sbang.get_interpreter(shebang) == interpreter
def test_shebang_handling(script_dir, sbang_line):
sbang.filter_shebangs_in_directory(script_dir.tempdir)
# Make sure this is untouched
with open(script_dir.short_shebang, "r", encoding="utf-8") as f:
assert f.readline() == short_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.long_shebang, "r", encoding="utf-8") as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
# Make sure this is untouched
with open(script_dir.nonexec_long_shebang, "r", encoding="utf-8") as f:
assert f.readline() == long_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.lua_shebang, "r", encoding="utf-8") as f:
assert f.readline() == sbang_line
assert f.readline() == lua_line_patched
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.luajit_shebang, "r", encoding="utf-8") as f:
assert f.readline() == sbang_line
assert f.readline() == luajit_line_patched
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.node_shebang, "r", encoding="utf-8") as f:
assert f.readline() == sbang_line
assert f.readline() == node_line_patched
assert f.readline() == last_line
assert filecmp.cmp(script_dir.lua_textbang, os.path.join(script_dir.tempdir, "lua_in_text"))
assert filecmp.cmp(
script_dir.luajit_textbang, os.path.join(script_dir.tempdir, "luajit_in_text")
)
assert filecmp.cmp(script_dir.node_textbang, os.path.join(script_dir.tempdir, "node_in_text"))
assert filecmp.cmp(script_dir.php_textbang, os.path.join(script_dir.tempdir, "php_in_text"))
# Make sure this is untouched
with open(script_dir.has_sbang, "r", encoding="utf-8") as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
def test_shebang_handles_non_writable_files(script_dir, sbang_line):
# make a file non-writable
st = os.stat(script_dir.long_shebang)
not_writable_mode = st.st_mode & ~stat.S_IWRITE
os.chmod(script_dir.long_shebang, not_writable_mode)
test_shebang_handling(script_dir, sbang_line)
st = os.stat(script_dir.long_shebang)
assert oct(not_writable_mode) == oct(st.st_mode)
@pytest.fixture(scope="function")
def configure_group_perms():
# On systems with remote groups, the primary user group may be remote
# and grp does not act on remote groups.
# To ensure we find a group we can operate on, we get take the first group
# listed which has the current user as a member.
gid = fs.group_ids(os.getuid())[0]
group_name = grp.getgrgid(gid).gr_name
conf = syaml.load_config(
"""\
all:
permissions:
read: world
write: group
group: {0}
""".format(
group_name
)
)
spack.config.set("packages", conf, scope="user")
yield
@pytest.fixture(scope="function")
def configure_user_perms():
conf = syaml.load_config(
"""\
all:
permissions:
read: world
write: user
"""
)
spack.config.set("packages", conf, scope="user")
yield
def check_sbang_installation(group=False):
sbang_path = sbang.sbang_install_path()
sbang_bin_dir = os.path.dirname(sbang_path)
assert sbang_path.startswith(spack.store.STORE.unpadded_root)
assert os.path.exists(sbang_path)
assert fs.is_exe(sbang_path)
status = os.stat(sbang_bin_dir)
mode = status.st_mode & 0o777
if group:
assert mode == 0o775, "Unexpected {0}".format(oct(mode))
else:
assert mode == 0o755, "Unexpected {0}".format(oct(mode))
status = os.stat(sbang_path)
mode = status.st_mode & 0o777
if group:
assert mode == 0o775, "Unexpected {0}".format(oct(mode))
else:
assert mode == 0o755, "Unexpected {0}".format(oct(mode))
def run_test_install_sbang(group):
sbang_path = sbang.sbang_install_path()
sbang_bin_dir = os.path.dirname(sbang_path)
assert sbang_path.startswith(spack.store.STORE.unpadded_root)
assert not os.path.exists(sbang_bin_dir)
sbang.install_sbang()
check_sbang_installation(group)
# put an invalid file in for sbang
fs.mkdirp(sbang_bin_dir)
with open(sbang_path, "w", encoding="utf-8") as f:
f.write("foo")
sbang.install_sbang()
check_sbang_installation(group)
# install again and make sure sbang is still fine
sbang.install_sbang()
check_sbang_installation(group)
def test_install_group_sbang(install_mockery, configure_group_perms):
run_test_install_sbang(True)
def test_install_user_sbang(install_mockery, configure_user_perms):
run_test_install_sbang(False)
def test_install_sbang_too_long(tmp_path: pathlib.Path):
root = str(tmp_path)
num_extend = sbang.system_shebang_limit - len(root) - len("/bin/sbang")
long_path = root
while num_extend > 1:
add = min(num_extend, 255)
long_path = os.path.join(long_path, "e" * add)
num_extend -= add
with spack.store.use_store(long_path):
with pytest.raises(sbang.SbangPathError) as exc_info:
sbang.sbang_install_path()
err = str(exc_info.value)
assert "root is too long" in err
assert "exceeds limit" in err
assert "cannot patch" in err
def test_sbang_hook_skips_nonexecutable_blobs(tmp_path: pathlib.Path):
# Write a binary blob to non-executable.sh, with a long interpreter "path"
# consisting of invalid UTF-8. The latter is technically not really necessary for
# the test, but binary blobs accidentally starting with b'#!' usually do not contain
# valid UTF-8, so we also ensure that Spack does not attempt to decode as UTF-8.
contents = b"#!" + b"\x80" * sbang.system_shebang_limit
file = str(tmp_path / "non-executable.sh")
with open(file, "wb") as f:
f.write(contents)
sbang.filter_shebangs_in_directory(str(tmp_path))
# Make sure there is no sbang shebang.
with open(file, "rb") as f:
assert b"sbang" not in f.readline()
def test_sbang_handles_non_utf8_files(tmp_path: pathlib.Path):
# We have an executable with a copyright sign as filename
contents = b"#!" + b"\xa9" * sbang.system_shebang_limit + b"\nand another symbol: \xa9"
# Make sure it's indeed valid latin1 but invalid utf-8.
assert contents.decode("latin1")
with pytest.raises(UnicodeDecodeError):
contents.decode("utf-8")
# Put it in an executable file
file = str(tmp_path / "latin1.sh")
with open(file, "wb") as f:
f.write(contents)
# Run sbang
assert sbang.filter_shebang(file)
with open(file, "rb") as f:
new_contents = f.read()
assert contents in new_contents
assert b"sbang" in new_contents
@pytest.fixture
def shebang_limits_system_8_spack_16():
system_limit, sbang.system_shebang_limit = sbang.system_shebang_limit, 8
spack_limit, sbang.spack_shebang_limit = sbang.spack_shebang_limit, 16
yield
sbang.system_shebang_limit = system_limit
sbang.spack_shebang_limit = spack_limit
def test_shebang_exceeds_spack_shebang_limit(
shebang_limits_system_8_spack_16, tmp_path: pathlib.Path
):
"""Tests whether shebangs longer than Spack's limit are skipped"""
file = str(tmp_path / "longer_than_spack_limit.sh")
with open(file, "wb") as f:
f.write(b"#!" + b"x" * sbang.spack_shebang_limit)
# Then Spack shouldn't try to add a shebang
assert not sbang.filter_shebang(file)
with open(file, "rb") as f:
assert b"sbang" not in f.read()
def test_sbang_hook_handles_non_writable_files_preserving_permissions(tmp_path: pathlib.Path):
path = str(tmp_path / "file.sh")
with open(path, "w", encoding="utf-8") as f:
f.write(long_line)
os.chmod(path, 0o555)
sbang.filter_shebang(path)
with open(path, "r", encoding="utf-8") as f:
assert "sbang" in f.readline()
assert os.stat(path).st_mode & 0o777 == 0o555
| ScriptDirectory |
python | plotly__plotly.py | plotly/graph_objs/scatterpolar/_marker.py | {
"start": 233,
"end": 42102
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.marker"
_valid_props = {
"angle",
"angleref",
"anglesrc",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"gradient",
"line",
"maxdisplayed",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"standoff",
"standoffsrc",
"symbol",
"symbolsrc",
}
@property
def angle(self):
"""
Sets the marker angle in respect to `angleref`.
The 'angle' property is a angle (in degrees) that may be
specified as a number between -180 and 180, or a list, numpy array or other iterable thereof.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float|numpy.ndarray
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
@property
def angleref(self):
"""
Sets the reference for marker angle. With "previous", angle 0
points along the line from the previous point to this one. With
"up", angle 0 points toward the top of the screen.
The 'angleref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['previous', 'up']
Returns
-------
Any
"""
return self["angleref"]
@angleref.setter
def angleref(self, val):
self["angleref"] = val
@property
def anglesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `angle`.
The 'anglesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["anglesrc"]
@anglesrc.setter
def anglesrc(self, val):
self["anglesrc"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color` is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scatterpolar.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.Gradient`
- A dict of string/value properties that will be passed
to the Gradient constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.Gradient
"""
return self["gradient"]
@gradient.setter
def gradient(self, val):
self["gradient"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def maxdisplayed(self):
"""
Sets a maximum number of points to be drawn on the graph. 0
corresponds to no limit.
The 'maxdisplayed' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["maxdisplayed"]
@maxdisplayed.setter
def maxdisplayed(self, val):
self["maxdisplayed"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color` is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def standoff(self):
"""
Moves the marker away from the data point in the direction of
`angle` (in px). This can be useful for example if you have
another marker at this location and you want to point an
arrowhead marker at it.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
@property
def standoffsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `standoff`.
The 'standoffsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["standoffsrc"]
@standoffsrc.setter
def standoffsrc(self, val):
self["standoffsrc"] = val
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open', 53,
'53', 'arrow', 153, '153', 'arrow-open', 54, '54',
'arrow-wide', 154, '154', 'arrow-wide-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `symbol`.
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
@property
def _prop_descriptions(self):
return """\
angle
Sets the marker angle in respect to `angleref`.
angleref
Sets the reference for marker angle. With "previous",
angle 0 points along the line from the previous point
to this one. With "up", angle 0 points toward the top
of the screen.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatterpolar.marker.ColorB
ar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
gradient
:class:`plotly.graph_objects.scatterpolar.marker.Gradie
nt` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scatterpolar.marker.Line`
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
standoff
Moves the marker away from the data point in the
direction of `angle` (in px). This can be useful for
example if you have another marker at this location and
you want to point an arrowhead marker at it.
standoffsrc
Sets the source reference on Chart Studio Cloud for
`standoff`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
"""
def __init__(
self,
arg=None,
angle=None,
angleref=None,
anglesrc=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
maxdisplayed=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
standoff=None,
standoffsrc=None,
symbol=None,
symbolsrc=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Marker`
angle
Sets the marker angle in respect to `angleref`.
angleref
Sets the reference for marker angle. With "previous",
angle 0 points along the line from the previous point
to this one. With "up", angle 0 points toward the top
of the screen.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatterpolar.marker.ColorB
ar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
gradient
:class:`plotly.graph_objects.scatterpolar.marker.Gradie
nt` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scatterpolar.marker.Line`
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
standoff
Moves the marker away from the data point in the
direction of `angle` (in px). This can be useful for
example if you have another marker at this location and
you want to point an arrowhead marker at it.
standoffsrc
Sets the source reference on Chart Studio Cloud for
`standoff`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("angle", arg, angle)
self._set_property("angleref", arg, angleref)
self._set_property("anglesrc", arg, anglesrc)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("gradient", arg, gradient)
self._set_property("line", arg, line)
self._set_property("maxdisplayed", arg, maxdisplayed)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._set_property("size", arg, size)
self._set_property("sizemin", arg, sizemin)
self._set_property("sizemode", arg, sizemode)
self._set_property("sizeref", arg, sizeref)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("standoff", arg, standoff)
self._set_property("standoffsrc", arg, standoffsrc)
self._set_property("symbol", arg, symbol)
self._set_property("symbolsrc", arg, symbolsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_xcom.py | {
"start": 978,
"end": 2807
} | class ____:
@pytest.mark.parametrize(
"map_index",
[
pytest.param(None, id="map_index_none"),
pytest.param(-1, id="map_index_negative_one"),
pytest.param(0, id="map_index_zero"),
pytest.param(5, id="map_index_positive"),
],
)
def test_delete_includes_map_index_in_delete_xcom_message(self, map_index, mock_supervisor_comms):
"""Test that BaseXCom.delete properly passes map_index to the DeleteXCom message."""
with mock.patch.object(
BaseXCom, "_get_xcom_db_ref", return_value=XComResult(key="test_key", value="test_value")
) as mock_get_ref:
with mock.patch.object(BaseXCom, "purge") as mock_purge:
BaseXCom.delete(
key="test_key",
task_id="test_task",
dag_id="test_dag",
run_id="test_run",
map_index=map_index,
)
mock_get_ref.assert_called_once_with(
key="test_key",
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=map_index,
)
# Verify purge was called
mock_purge.assert_called_once()
# Verify DeleteXCom message was sent with map_index
mock_supervisor_comms.send.assert_called_once()
sent_message = mock_supervisor_comms.send.call_args[0][0]
assert isinstance(sent_message, DeleteXCom)
assert sent_message.key == "test_key"
assert sent_message.dag_id == "test_dag"
assert sent_message.task_id == "test_task"
assert sent_message.run_id == "test_run"
assert sent_message.map_index == map_index
| TestBaseXCom |
python | pypa__virtualenv | src/virtualenv/activation/activator.py | {
"start": 84,
"end": 1419
} | class ____(ABC):
"""Generates activate script for the virtual environment."""
def __init__(self, options) -> None:
"""
Create a new activator generator.
:param options: the parsed options as defined within :meth:`add_parser_arguments`
"""
self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == "." else options.prompt
@classmethod
def supports(cls, interpreter): # noqa: ARG003
"""
Check if the activation script is supported in the given interpreter.
:param interpreter: the interpreter we need to support
:return: ``True`` if supported, ``False`` otherwise
"""
return True
@classmethod # noqa: B027
def add_parser_arguments(cls, parser, interpreter):
"""
Add CLI arguments for this activation script.
:param parser: the CLI parser
:param interpreter: the interpreter this virtual environment is based of
"""
@abstractmethod
def generate(self, creator):
"""
Generate activate script for the given creator.
:param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \
virtual environment
"""
raise NotImplementedError
__all__ = [
"Activator",
]
| Activator |
python | conda__conda | conda/plugins/config.py | {
"start": 748,
"end": 4294
} | class ____(Configuration):
"""
Class used to hold settings for conda plugins.
The object created by this class should only be accessed via
:class:`conda.base.context.Context.plugins`.
When this class is updated via the :func:`add_plugin_setting` function it adds new setting
properties which can be accessed later via the context object.
We currently call that function in
:meth:`conda.plugins.manager.CondaPluginManager.load_settings`.
because ``CondaPluginManager`` has access to all registered plugin settings via the settings
plugin hook.
"""
parameter_names: tuple[str, ...] = ()
parameter_names_and_aliases: tuple[str, ...] = ()
@classmethod
def add_plugin_setting(
cls, name: str, parameter: Parameter, aliases: tuple[str, ...] = ()
):
"""
Adds a setting to the :class:`PluginConfig` class
"""
cls.parameter_names = (*cls.parameter_names, name)
loader = ParameterLoader(parameter, aliases=aliases)
name = loader._set_name(name)
setattr(cls, name, loader)
# Rebuild parameter_names_and_aliases to include the new parameter
cls._set_parameter_names_and_aliases()
@classmethod
def remove_all_plugin_settings(cls) -> None:
"""
Removes all attached settings from the :class:`PluginConfig` class
"""
for name in cls.parameter_names:
try:
delattr(cls, name)
except AttributeError:
continue
cls.parameter_names = tuple()
def __init__(self, data):
self._cache_ = {}
self._data = data
@property
def raw_data(self) -> dict[Path, dict[str, RawParameter]]:
"""
This is used to move everything under the key "plugins" from the provided dictionary
to the top level of the returned dictionary. The returned dictionary is then passed
to :class:`PluginConfig`.
"""
new_data = defaultdict(dict)
for source, config in self._data.items():
if plugin_data := config.get("plugins"):
plugin_data_value = plugin_data.value(None)
if not isinstance(plugin_data_value, Mapping):
continue
for param_name, raw_param in plugin_data_value.items():
new_data[source][param_name] = raw_param
elif source == EnvRawParameter.source:
for env_var, raw_param in config.items():
if env_var.startswith("plugins_"):
_, param_name = env_var.split("plugins_")
new_data[source][param_name] = raw_param
return new_data
@property
def category_map(self) -> dict[str, tuple[str, ...]]:
return {"Additional settings provided by plugins": self.parameter_names}
def get_descriptions(self) -> dict[str, str]:
from ..base.context import context
return {
name: setting.description
for name, setting in context.plugin_manager.get_settings().items()
}
def describe_parameter(self, parameter_name) -> dict[str, Any]:
"""
Returns the description of a parameter.
We add to this method in order to change the "name" key that is returned to prepend "plugins."
to it.
"""
description = super().describe_parameter(parameter_name)
description["name"] = f"plugins.{description['name']}"
return description
| PluginConfig |
python | Textualize__textual | src/textual/widgets/_masked_input.py | {
"start": 1622,
"end": 15966
} | class ____(Validator):
"""Template mask enforcer."""
@dataclass
class CharDefinition:
"""Holds data for a single char of the template mask."""
pattern: Pattern[str]
"""Compiled regular expression to check for matches."""
flags: _CharFlags = _CharFlags.NONE
"""Flags defining special behaviors"""
char: str = ""
"""Mask character (separator or blank or placeholder)"""
def __init__(self, input: Input, template_str: str) -> None:
"""Initialise the mask enforcer, which is also a subclass of `Validator`.
Args:
input: The `MaskedInput` that owns this object.
template_str: Template string controlling masked input behavior.
"""
self.input = input
self.template: list[_Template.CharDefinition] = []
self.blank: str = " "
escaped = False
flags = _CharFlags.NONE
template_chars: list[str] = list(template_str)
while template_chars:
char = template_chars.pop(0)
if escaped:
char_definition = self.CharDefinition(
re.compile(re.escape(char)), _CharFlags.SEPARATOR, char
)
escaped = False
else:
if char == "\\":
escaped = True
continue
elif char == ";":
break
new_flags = {
">": _CharFlags.UPPERCASE,
"<": _CharFlags.LOWERCASE,
"!": _CharFlags.NONE,
}.get(char, None)
if new_flags is not None:
flags = new_flags
continue
pattern, required_flag = _TEMPLATE_CHARACTERS.get(char, (None, None))
if pattern:
char_flags = (
_CharFlags.REQUIRED if required_flag else _CharFlags.NONE
)
char_definition = self.CharDefinition(
re.compile(pattern), char_flags
)
else:
char_definition = self.CharDefinition(
re.compile(re.escape(char)), _CharFlags.SEPARATOR, char
)
char_definition.flags |= flags
self.template.append(char_definition)
if template_chars:
self.blank = template_chars[0]
if all(
(_CharFlags.SEPARATOR in char_definition.flags)
for char_definition in self.template
):
raise ValueError(
"Template must contain at least one non-separator character"
)
self.update_mask(input.placeholder)
def validate(self, value: str) -> ValidationResult:
"""Checks if `value` matches this template, always returning a ValidationResult.
Args:
value: The string value to be validated.
Returns:
A ValidationResult with the validation outcome.
"""
if self.check(value.ljust(len(self.template), chr(0)), False):
return self.success()
else:
return self.failure("Value does not match template!", value)
def check(self, value: str, allow_space: bool) -> bool:
"""Checks if `value matches this template, but returns result as a bool.
Args:
value: The string value to be validated.
allow_space: Consider space character in `value` as valid.
Returns:
True if `value` is valid for this template, False otherwise.
"""
for char, char_definition in zip(value, self.template):
if (
(_CharFlags.REQUIRED in char_definition.flags)
and (not char_definition.pattern.match(char))
and ((char != " ") or not allow_space)
):
return False
return True
def insert_separators(self, value: str, cursor_position: int) -> tuple[str, int]:
"""Automatically inserts separators in `value` at `cursor_position` if expected, eventually advancing
the current cursor position.
Args:
value: Current control value entered by user.
cursor_position: Where to start inserting separators (if any).
Returns:
A tuple in the form `(value, cursor_position)` with new value and possibly advanced cursor position.
"""
while cursor_position < len(self.template) and (
_CharFlags.SEPARATOR in self.template[cursor_position].flags
):
value = (
value[:cursor_position]
+ self.template[cursor_position].char
+ value[cursor_position + 1 :]
)
cursor_position += 1
return value, cursor_position
def insert_text_at_cursor(self, text: str) -> str | None:
"""Inserts `text` at current cursor position. If not present in `text`, any expected separator is automatically
inserted at the correct position.
Args:
text: The text to be inserted.
Returns:
A tuple in the form `(value, cursor_position)` with the new control value and current cursor position if
`text` matches the template, None otherwise.
"""
value = self.input.value
cursor_position = self.input.cursor_position
separators = set(
[
char_definition.char
for char_definition in self.template
if _CharFlags.SEPARATOR in char_definition.flags
]
)
for char in text:
if char in separators:
if char == self.next_separator(cursor_position):
prev_position = self.prev_separator_position(cursor_position)
if (cursor_position > 0) and (prev_position != cursor_position - 1):
next_position = self.next_separator_position(cursor_position)
while cursor_position < next_position + 1:
if (
_CharFlags.SEPARATOR
in self.template[cursor_position].flags
):
char = self.template[cursor_position].char
else:
char = " "
value = (
value[:cursor_position]
+ char
+ value[cursor_position + 1 :]
)
cursor_position += 1
continue
if cursor_position >= len(self.template):
break
char_definition = self.template[cursor_position]
assert _CharFlags.SEPARATOR not in char_definition.flags
if not char_definition.pattern.match(char):
return None
if _CharFlags.LOWERCASE in char_definition.flags:
char = char.lower()
elif _CharFlags.UPPERCASE in char_definition.flags:
char = char.upper()
value = value[:cursor_position] + char + value[cursor_position + 1 :]
cursor_position += 1
value, cursor_position = self.insert_separators(value, cursor_position)
return value, cursor_position
def move_cursor(self, delta: int) -> None:
"""Moves the cursor position by `delta` characters, skipping separators if
running over them.
Args:
delta: The number of characters to move; positive moves right, negative
moves left.
"""
cursor_position = self.input.cursor_position
if delta < 0 and all(
[
(_CharFlags.SEPARATOR in char_definition.flags)
for char_definition in self.template[:cursor_position]
]
):
return
cursor_position += delta
while (
(cursor_position >= 0)
and (cursor_position < len(self.template))
and (_CharFlags.SEPARATOR in self.template[cursor_position].flags)
):
cursor_position += delta
self.input.cursor_position = cursor_position
def delete_at_position(self, position: int | None = None) -> None:
"""Deletes character at `position`.
Args:
position: Position within the control value where to delete a character;
if None the current cursor position is used.
"""
value = self.input.value
if position is None:
position = self.input.cursor_position
cursor_position = position
if cursor_position < len(self.template):
assert _CharFlags.SEPARATOR not in self.template[cursor_position].flags
if cursor_position == len(value) - 1:
value = value[:cursor_position]
else:
value = value[:cursor_position] + " " + value[cursor_position + 1 :]
pos = len(value)
while pos > 0:
char_definition = self.template[pos - 1]
if (_CharFlags.SEPARATOR not in char_definition.flags) and (
value[pos - 1] != " "
):
break
pos -= 1
value = value[:pos]
if cursor_position > len(value):
cursor_position = len(value)
value, cursor_position = self.insert_separators(value, cursor_position)
self.input.cursor_position = cursor_position
self.input.value = value
def at_separator(self, position: int | None = None) -> bool:
"""Checks if character at `position` is a separator.
Args:
position: Position within the control value where to check;
if None the current cursor position is used.
Returns:
True if character is a separator, False otherwise.
"""
if position is None:
position = self.input.cursor_position
if (position >= 0) and (position < len(self.template)):
return _CharFlags.SEPARATOR in self.template[position].flags
else:
return False
def prev_separator_position(self, position: int | None = None) -> int | None:
"""Obtains the position of the previous separator character starting from
`position` within the template string.
Args:
position: Starting position from which to search previous separator.
If None, current cursor position is used.
Returns:
The position of the previous separator, or None if no previous
separator is found.
"""
if position is None:
position = self.input.cursor_position
for index in range(position - 1, 0, -1):
if _CharFlags.SEPARATOR in self.template[index].flags:
return index
else:
return None
def next_separator_position(self, position: int | None = None) -> int | None:
"""Obtains the position of the next separator character starting from
`position` within the template string.
Args:
position: Starting position from which to search next separator.
If None, current cursor position is used.
Returns:
The position of the next separator, or None if no next
separator is found.
"""
if position is None:
position = self.input.cursor_position
for index in range(position + 1, len(self.template)):
if _CharFlags.SEPARATOR in self.template[index].flags:
return index
else:
return None
def next_separator(self, position: int | None = None) -> str | None:
"""Obtains the next separator character starting from `position`
within the template string.
Args:
position: Starting position from which to search next separator.
If None, current cursor position is used.
Returns:
The next separator character, or None if no next
separator is found.
"""
position = self.next_separator_position(position)
if position is None:
return None
else:
return self.template[position].char
def display(self, value: str) -> str:
"""Returns `value` ready for display, with spaces replaced by
placeholder characters.
Args:
value: String value to display.
Returns:
New string value with spaces replaced by placeholders.
"""
result = []
for char, char_definition in zip(value, self.template):
if char == " ":
char = char_definition.char
result.append(char)
return "".join(result)
def update_mask(self, placeholder: str) -> None:
"""Updates template placeholder characters from `placeholder`. If
given string is smaller than template string, template blank character
is used to fill remaining template placeholder characters.
Args:
placeholder: New placeholder string.
"""
for index, char_definition in enumerate(self.template):
if _CharFlags.SEPARATOR not in char_definition.flags:
if index < len(placeholder):
char_definition.char = placeholder[index]
else:
char_definition.char = self.blank
@property
def mask(self) -> str:
"""Property returning the template placeholder mask."""
return "".join([char_definition.char for char_definition in self.template])
@property
def empty_mask(self) -> str:
"""Property returning the template placeholder mask with all non-separators replaced by space."""
return "".join(
[
(
" "
if (_CharFlags.SEPARATOR not in char_definition.flags)
else char_definition.char
)
for char_definition in self.template
]
)
| _Template |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 1076,
"end": 2182
} | class ____(Blockwise):
"""Partition-wise component of `ApplyConcatApply`
This class is used within `ApplyConcatApply._lower`.
See Also
--------
ApplyConcatApply
"""
_parameters = ["frame", "kind", "chunk", "chunk_kwargs"]
@property
def operation(self):
return self.chunk
@functools.cached_property
def _args(self) -> list:
return [self.frame]
@functools.cached_property
def _kwargs(self) -> dict:
return self.chunk_kwargs or {}
def _tree_repr_lines(self, indent=0, recursive=True):
header = f"{funcname(self.kind)}({funcname(type(self))}):"
lines = []
if recursive:
for dep in self.dependencies():
lines.extend(dep._tree_repr_lines(2))
for k, v in self._kwargs.items():
try:
if v != self.kind._defaults[k]:
header += f" {k}={v}"
except KeyError:
header += f" {k}={v}"
lines = [header] + lines
lines = [" " * indent + line for line in lines]
return lines
| Chunk |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 13376,
"end": 14302
} | class ____(object):
# AbilityT
def __init__(
self,
id = 0,
distance = 0,
):
self.id = id # type: int
self.distance = distance # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
ability = Ability()
ability.Init(buf, pos)
return cls.InitFromObj(ability)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, ability):
x = AbilityT()
x._UnPack(ability)
return x
# AbilityT
def _UnPack(self, ability):
if ability is None:
return
self.id = ability.Id()
self.distance = ability.Distance()
# AbilityT
def Pack(self, builder):
return CreateAbility(builder, self.id, self.distance)
| AbilityT |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/unselected/_textfont.py | {
"start": 233,
"end": 2598
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.unselected"
_path_str = "scattergeo.unselected.textfont"
_valid_props = {"color"}
@property
def color(self):
"""
Sets the text font color of unselected points, applied only
when a selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergeo.uns
elected.Textfont`
color
Sets the text font color of unselected points, applied
only when a selection exists.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.unselected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.unselected.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/qa_with_sources/retrieval.py | {
"start": 478,
"end": 2542
} | class ____(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
| RetrievalQAWithSourcesChain |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py | {
"start": 365,
"end": 480
} | class ____:
a: str = 0
b = field()
c: int = foo()
d = list()
@frozen # auto_attribs = None => True
| C |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 2514,
"end": 3402
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
# Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->BigBirdPegasus
| BigBirdPegasusLearnedPositionalEmbedding |
python | PyCQA__mccabe | mccabe.py | {
"start": 6756,
"end": 10717
} | class ____(object):
"""McCabe cyclomatic complexity checker."""
name = 'mccabe'
version = __version__
_code = 'C901'
_error_tmpl = "C901 %r is too complex (%d)"
max_complexity = -1
def __init__(self, tree, filename):
self.tree = tree
@classmethod
def add_options(cls, parser):
flag = '--max-complexity'
kwargs = {
'default': -1,
'action': 'store',
'type': int,
'help': 'McCabe complexity threshold',
'parse_from_config': 'True',
}
config_opts = getattr(parser, 'config_options', None)
if isinstance(config_opts, list):
# Flake8 2.x
kwargs.pop('parse_from_config')
parser.add_option(flag, **kwargs)
parser.config_options.append('max-complexity')
else:
parser.add_option(flag, **kwargs)
@classmethod
def parse_options(cls, options):
cls.max_complexity = int(options.max_complexity)
def run(self):
if self.max_complexity < 0:
return
visitor = PathGraphingAstVisitor()
visitor.preorder(self.tree, visitor)
for graph in visitor.graphs.values():
if graph.complexity() > self.max_complexity:
text = self._error_tmpl % (graph.entity, graph.complexity())
yield graph.lineno, graph.column, text, type(self)
def get_code_complexity(code, threshold=7, filename='stdin'):
try:
tree = compile(code, filename, "exec", ast.PyCF_ONLY_AST)
except SyntaxError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to parse %s: %s\n" % (filename, e))
return 0
complx = []
McCabeChecker.max_complexity = threshold
for lineno, offset, text, check in McCabeChecker(tree, filename).run():
complx.append('%s:%d:1: %s' % (filename, lineno, text))
if len(complx) == 0:
return 0
print('\n'.join(complx))
return len(complx)
def get_module_complexity(module_path, threshold=7):
"""Returns the complexity of a module"""
code = _read(module_path)
return get_code_complexity(code, threshold, filename=module_path)
def _read(filename):
if (2, 5) < sys.version_info < (3, 0):
with open(filename, 'rU') as f:
return f.read()
elif (3, 0) <= sys.version_info < (4, 0):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(encoding, _) = tokenize.detect_encoding(f.readline)
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.read()
with open(filename, 'r', encoding=encoding) as f:
return f.read()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
opar = optparse.OptionParser()
opar.add_option("-d", "--dot", dest="dot",
help="output a graphviz dot file", action="store_true")
opar.add_option("-m", "--min", dest="threshold",
help="minimum complexity for output", type="int",
default=1)
options, args = opar.parse_args(argv)
if not args:
opar.print_help()
opar.exit()
code = _read(args[0])
tree = compile(code, args[0], "exec", ast.PyCF_ONLY_AST)
visitor = PathGraphingAstVisitor()
visitor.preorder(tree, visitor)
if options.dot:
print('graph {')
for graph in visitor.graphs.values():
if (not options.threshold or
graph.complexity() >= options.threshold):
graph.to_dot()
print('}')
else:
for graph in visitor.graphs.values():
if graph.complexity() >= options.threshold:
print(graph.name, graph.complexity())
if __name__ == '__main__':
main(sys.argv[1:])
| McCabeChecker |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_index.py | {
"start": 1367,
"end": 7702
} | class ____(TestCase):
def test_valid(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {
"email": "eric@localhost",
"orgRole": "member",
"teamRoles": [{"teamSlug": self.team.slug, "role": None}],
}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
def test_valid_deprecated_fields(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {"email": "eric@localhost", "role": "member", "teams": [self.team.slug]}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
def test_gets_team_objects(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {
"email": "eric@localhost",
"orgRole": "member",
"teamRoles": [{"teamSlug": self.team.slug, "role": "admin"}],
}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
assert serializer.validated_data["teamRoles"][0] == (self.team, "admin")
def test_gets_team_objects_with_deprecated_field(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {"email": "eric@localhost", "orgRole": "member", "teams": [self.team.slug]}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
assert serializer.validated_data["teams"][0] == self.team
def test_invalid_email(self) -> None:
org = self.create_organization()
user = self.create_user()
member = self.create_member(organization=org, email=user.email)
context = {"organization": org, "allowed_roles": [roles.get("member")]}
data = {"email": user.email, "orgRole": "member", "teamRoles": []}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
assert serializer.errors == {"email": [f"The user {user.email} has already been invited"]}
request = self.make_request(user=user)
with assume_test_silo_mode(SiloMode.CONTROL):
UserEmail.objects.filter(user=user, email=user.email).update(is_verified=False)
invite_state = get_invite_state(member.id, org.slug, user.id, request)
assert invite_state, "Expected invite state, logic bug?"
invite_helper = ApiInviteHelper(
request=request, invite_context=invite_state, token=None
)
invite_helper.accept_invite(user)
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
assert serializer.errors == {"email": [f"The user {user.email} is already a member"]}
def test_invalid_team_invites(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {"email": "eric@localhost", "orgRole": "member", "teams": ["faketeam"]}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
assert serializer.errors == {"teams": ["Invalid teams"]}
def test_invalid_org_role(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {"email": "eric@localhost", "orgRole": "owner", "teamRoles": []}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
assert serializer.errors == {
"orgRole": ["You do not have permission to set that org-level role"]
}
@with_feature({"organizations:team-roles": False})
def test_deprecated_org_role_without_flag(self) -> None:
context = {
"organization": self.organization,
"allowed_roles": [roles.get("admin"), roles.get("member")],
}
data = {"email": "eric@localhost", "orgRole": "admin", "teamRoles": []}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
@with_feature("organizations:team-roles")
def test_deprecated_org_role_with_flag(self) -> None:
context = {
"organization": self.organization,
"allowed_roles": [roles.get("admin"), roles.get("member")],
}
data = {"email": "eric@localhost", "orgRole": "admin", "teamRoles": []}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
def test_invalid_team_role(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {
"email": "eric@localhost",
"orgRole": "member",
"teamRoles": [{"teamSlug": self.team.slug, "role": "no-such-team-role"}],
}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
assert serializer.errors == {"teamRoles": ["Invalid team-role"]}
@with_feature("organizations:invite-billing")
def test_valid_invite_billing_member(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {
"email": "bill@localhost",
"orgRole": "billing",
"teamRoles": [],
}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert serializer.is_valid()
def test_invalid_invite_billing_member(self) -> None:
context = {"organization": self.organization, "allowed_roles": [roles.get("member")]}
data = {
"email": "bill@localhost",
"orgRole": "billing",
"teamRoles": [],
}
serializer = OrganizationMemberRequestSerializer(context=context, data=data)
assert not serializer.is_valid()
| OrganizationMemberRequestSerializerTest |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 53485,
"end": 60689
} | class ____(PsutilTestCase):
def test_boot_time(self):
vmstat_value = vmstat('boot time')
psutil_value = psutil.boot_time()
assert int(vmstat_value) == int(psutil_value)
def test_no_procfs_on_import(self):
my_procfs = self.get_testfn()
os.mkdir(my_procfs)
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
try:
orig_open = open
def open_mock(name, *args, **kwargs):
if name.startswith('/proc'):
raise FileNotFoundError
return orig_open(name, *args, **kwargs)
with mock.patch("builtins.open", side_effect=open_mock):
reload_module(psutil)
with pytest.raises(OSError):
psutil.cpu_times()
with pytest.raises(OSError):
psutil.cpu_times(percpu=True)
with pytest.raises(OSError):
psutil.cpu_percent()
with pytest.raises(OSError):
psutil.cpu_percent(percpu=True)
with pytest.raises(OSError):
psutil.cpu_times_percent()
with pytest.raises(OSError):
psutil.cpu_times_percent(percpu=True)
psutil.PROCFS_PATH = my_procfs
assert psutil.cpu_percent() == 0
assert sum(psutil.cpu_times_percent()) == 0
# since we don't know the number of CPUs at import time,
# we awkwardly say there are none until the second call
per_cpu_percent = psutil.cpu_percent(percpu=True)
assert sum(per_cpu_percent) == 0
# ditto awkward length
per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
assert sum(map(sum, per_cpu_times_percent)) == 0
# much user, very busy
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
assert psutil.cpu_percent() != 0
assert sum(psutil.cpu_percent(percpu=True)) != 0
assert sum(psutil.cpu_times_percent()) != 0
assert (
sum(map(sum, psutil.cpu_times_percent(percpu=True))) != 0
)
finally:
shutil.rmtree(my_procfs)
reload_module(psutil)
assert psutil.PROCFS_PATH == '/proc'
def test_cpu_steal_decrease(self):
# Test cumulative cpu stats decrease. We should ignore this.
# See issue #1210.
content = textwrap.dedent("""\
cpu 0 0 0 0 0 0 0 1 0 0
cpu0 0 0 0 0 0 0 0 1 0 0
cpu1 0 0 0 0 0 0 0 1 0 0
""").encode()
with mock_open_content({"/proc/stat": content}) as m:
# first call to "percent" functions should read the new stat file
# and compare to the "real" file read at import time - so the
# values are meaningless
psutil.cpu_percent()
assert m.called
psutil.cpu_percent(percpu=True)
psutil.cpu_times_percent()
psutil.cpu_times_percent(percpu=True)
content = textwrap.dedent("""\
cpu 1 0 0 0 0 0 0 0 0 0
cpu0 1 0 0 0 0 0 0 0 0 0
cpu1 1 0 0 0 0 0 0 0 0 0
""").encode()
with mock_open_content({"/proc/stat": content}):
# Increase "user" while steal goes "backwards" to zero.
cpu_percent = psutil.cpu_percent()
assert m.called
cpu_percent_percpu = psutil.cpu_percent(percpu=True)
cpu_times_percent = psutil.cpu_times_percent()
cpu_times_percent_percpu = psutil.cpu_times_percent(percpu=True)
assert cpu_percent != 0
assert sum(cpu_percent_percpu) != 0
assert sum(cpu_times_percent) != 0
assert sum(cpu_times_percent) != 100.0
assert sum(map(sum, cpu_times_percent_percpu)) != 0
assert sum(map(sum, cpu_times_percent_percpu)) != 100.0
assert cpu_times_percent.steal == 0
assert cpu_times_percent.user != 0
def test_boot_time_mocked(self):
with mock.patch('psutil._common.open', create=True) as m:
with pytest.raises(RuntimeError):
psutil._pslinux.boot_time()
assert m.called
def test_users(self):
# Make sure the C extension converts ':0' and ':0.0' to
# 'localhost'.
for user in psutil.users():
assert user.host not in {":0", ":0.0"}
def test_procfs_path(self):
tdir = self.get_testfn()
os.mkdir(tdir)
try:
psutil.PROCFS_PATH = tdir
with pytest.raises(OSError):
psutil.virtual_memory()
with pytest.raises(OSError):
psutil.cpu_times()
with pytest.raises(OSError):
psutil.cpu_times(percpu=True)
with pytest.raises(OSError):
psutil.boot_time()
with pytest.raises(OSError):
psutil.net_connections()
with pytest.raises(OSError):
psutil.net_io_counters()
with pytest.raises(OSError):
psutil.net_if_stats()
with pytest.raises(OSError):
psutil.disk_partitions()
with pytest.raises(psutil.NoSuchProcess):
psutil.Process()
finally:
psutil.PROCFS_PATH = "/proc"
@retry_on_failure()
@pytest.mark.xdist_group(name="serial")
def test_issue_687(self):
# In case of thread ID:
# - pid_exists() is supposed to return False
# - Process(tid) is supposed to work
# - pids() should not return the TID
# See: https://github.com/giampaolo/psutil/issues/687
p = psutil.Process()
nthreads = len(p.threads())
with ThreadTask():
threads = p.threads()
assert len(threads) == nthreads + 1
tid = sorted(threads, key=lambda x: x.id)[1].id
assert p.pid != tid
pt = psutil.Process(tid)
pt.as_dict()
assert tid not in psutil.pids()
def test_pid_exists_no_proc_status(self):
# Internally pid_exists relies on /proc/{pid}/status.
# Emulate a case where this file is empty in which case
# psutil is supposed to fall back on using pids().
with mock_open_content({"/proc/%s/status": ""}) as m:
assert psutil.pid_exists(os.getpid())
assert m.called
# =====================================================================
# --- sensors
# =====================================================================
@pytest.mark.skipif(not LINUX, reason="LINUX only")
@pytest.mark.skipif(not HAS_BATTERY, reason="no battery")
| TestMisc |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 684311,
"end": 684957
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("VerifiableDomainEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("VerifiableDomain"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| VerifiableDomainConnection |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink32.py | {
"start": 315,
"end": 902
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink32.xlsx")
def test_create_file(self):
"""Test the creation of a file with hyperlinked image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9", self.image_dir + "red.png", {"url": "https://github.com/jmcnamara"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 24210,
"end": 25219
} | class ____(BaseModel):
"""Options to start the gRPC Proxy with."""
port: int = Field(
default=DEFAULT_GRPC_PORT,
description=(
"Port for gRPC server. Defaults to 9000. Cannot be updated once "
"Serve has started running. Serve must be shut down and restarted "
"with the new port instead."
),
)
grpc_servicer_functions: List[str] = Field(
default=[],
description=(
"List of import paths for gRPC `add_servicer_to_server` functions to add "
"to Serve's gRPC proxy. Default to empty list, which means no gRPC methods "
"will be added and no gRPC server will be started. The servicer functions "
"need to be importable from the context of where Serve is running."
),
)
request_timeout_s: float = Field(
default=None,
description="The timeout for gRPC requests. Defaults to no timeout.",
)
@PublicAPI(stability="stable")
| gRPCOptionsSchema |
python | numba__numba | numba/tests/test_funcdesc.py | {
"start": 790,
"end": 1698
} | class ____(unittest.TestCase):
def test_mangling_abi_tags(self):
"""
This is a minimal test for the abi-tags support in the mangler.
"""
def udt():
pass
# run minimal frontend to create a function descriptor
func_ir = run_frontend(udt)
typemap = {}
restype = None
calltypes = ()
mangler = default_mangler
inline = False
noalias = False
abi_tags = ("Shrubbery", "Herring")
fd = PythonFunctionDescriptor.from_specialized_function(
func_ir, typemap, restype, calltypes, mangler, inline, noalias,
abi_tags=abi_tags,
)
# mangled tag must exist in the mangled name
self.assertIn("".join([mangle_abi_tag(x) for x in abi_tags]),
fd.mangled_name)
if __name__ == '__main__':
unittest.main()
| TestFuncDescMangledName |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_info_command.py | {
"start": 6050,
"end": 6928
} | class ____:
@conf_vars(
{
("database", "sql_alchemy_conn"): "postgresql+psycopg2://postgres:airflow@postgres/airflow",
}
)
def test_show_info_anonymize_fileio(self, setup_parser, cleanup_providers_manager, stdout_capture):
with mock.patch("airflow.cli.commands.info_command.httpx.post") as post:
post.return_value = httpx.Response(
status_code=200,
json={
"success": True,
"key": "f9U3zs3I",
"link": "https://file.io/TEST",
"expiry": "14 days",
},
)
with stdout_capture as stdout:
info_command.show_info(setup_parser.parse_args(["info", "--file-io", "--anonymize"]))
assert "https://file.io/TEST" in stdout.getvalue()
| TestInfoCommandMockHttpx |
python | huggingface__transformers | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | {
"start": 2718,
"end": 5749
} | class ____(DeepseekVLConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekVLHybridModel`]. It is used to instantiate a
DeepseekVLHybrid model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DeepseekVLHybrid
[deepseek-community/deepseek-vl-7b-chat](https://huggingface.co/deepseek-community/deepseek-vl-7b-chat) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
high_res_vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SamVisionConfig`):
The config object or dictionary of the high resolution vision backbone.
image_token_id (`int`, *optional*, defaults to 100015):
The index representing image tokens in the model's token vocabulary.
Example:
```python
>>> from transformers import DeepseekVLHybridConfig, DeepseekVLHybridModel
>>> # Initializing a DeepseekVLHybrid deepseek-community/deepseek-vl-7b-chat style configuration
>>> configuration = DeepseekVLHybridConfig()
>>> # Initializing a model (with random weights) from the deepseek-community/deepseek-vl-7b-chat style configuration
>>> model = DeepseekVLHybridModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deepseek_vl_hybrid"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig, "high_res_vision_config": AutoConfig}
def __init__(
self,
text_config: Optional[AutoConfig] = None,
vision_config: Optional[AutoConfig] = None,
high_res_vision_config: Optional[AutoConfig] = None,
image_token_id: int = 100015,
**kwargs,
):
if high_res_vision_config is None:
high_res_vision_config = {}
logger.info("`high_res_vision_config` is `None`. Initializing the `SamVisionConfig` with default values.")
if isinstance(high_res_vision_config, dict):
high_res_vision_config["model_type"] = high_res_vision_config.get("model_type", "sam_vision_model")
high_res_vision_config = CONFIG_MAPPING[high_res_vision_config["model_type"]](**high_res_vision_config)
self.high_res_vision_config = high_res_vision_config
super().__init__(
text_config=text_config,
vision_config=vision_config,
image_token_id=image_token_id,
**kwargs,
)
| DeepseekVLHybridConfig |
python | kamyu104__LeetCode-Solutions | Python/distribute-candies-among-children-i.py | {
"start": 96,
"end": 772
} | class ____(object):
def distributeCandies(self, n, limit):
"""
:type n: int
:type limit: int
:rtype: int
"""
def nCr(n, r): # Time: O(n), Space: O(1)
if not 0 <= r <= n:
return 0
if n-r < r:
r = n-r
c = 1
for k in xrange(1, r+1):
c *= n-k+1
c //= k
return c
def nHr(n, r):
return nCr(n+(r-1), r-1)
R = 3
return sum((-1 if r%2 else 1) * nCr(R, r) * nHr(n-r*(limit+1), R)for r in xrange(R+1))
# Time: O(n)
# Space: O(1)
# optimized brute force
| Solution |
python | doocs__leetcode | solution/2300-2399/2371.Minimize Maximum Value in a Grid/Solution.py | {
"start": 0,
"end": 471
} | class ____:
def minScore(self, grid: List[List[int]]) -> List[List[int]]:
m, n = len(grid), len(grid[0])
nums = [(v, i, j) for i, row in enumerate(grid) for j, v in enumerate(row)]
nums.sort()
row_max = [0] * m
col_max = [0] * n
ans = [[0] * n for _ in range(m)]
for _, i, j in nums:
ans[i][j] = max(row_max[i], col_max[j]) + 1
row_max[i] = col_max[j] = ans[i][j]
return ans
| Solution |
python | tox-dev__tox | src/tox/config/cli/parse.py | {
"start": 518,
"end": 3221
} | class ____(NamedTuple):
parsed: Parsed
pos_args: Sequence[str] | None
source: Source
cmd_handlers: dict[str, Callable[[State], int]]
log_handler: ToxHandler
def get_options(*args: str) -> Options:
pos_args: tuple[str, ...] | None = None
try: # remove positional arguments passed to parser if specified, they are pulled directly from sys.argv
pos_arg_at = args.index("--")
except ValueError:
pass
else:
pos_args = tuple(args[pos_arg_at + 1 :])
args = args[:pos_arg_at]
guess_verbosity, log_handler, source = _get_base(args)
parsed, cmd_handlers = _get_all(args)
if guess_verbosity != parsed.verbosity:
log_handler.update_verbosity(parsed.verbosity)
return Options(parsed, pos_args, source, cmd_handlers, log_handler)
def _get_base(args: Sequence[str]) -> tuple[int, ToxHandler, Source]:
"""First just load the base options (verbosity+color) to setup the logging framework."""
tox_parser = ToxParser.base()
parsed = Parsed()
try:
with (
Path(os.devnull).open("w", encoding=locale.getpreferredencoding(do_setlocale=False)) as file_handler,
redirect_stderr(file_handler),
):
tox_parser.parse_known_args(args, namespace=parsed)
except SystemExit:
... # ignore parse errors, such as -va raises ignored explicit argument 'a'
guess_verbosity = parsed.verbosity
handler = setup_report(guess_verbosity, parsed.is_colored)
from tox.plugin.manager import MANAGER # load the plugin system right after we set up report # noqa: PLC0415
source = discover_source(parsed.config_file, parsed.root_dir)
MANAGER.load_plugins(source.path)
return guess_verbosity, handler, source
def _get_all(args: Sequence[str]) -> tuple[Parsed, dict[str, Callable[[State], int]]]:
"""Parse all the options."""
tox_parser = _get_parser()
parsed = cast("Parsed", tox_parser.parse_args(args))
handlers = {k: p for k, (_, p) in tox_parser.handlers.items()}
return parsed, handlers
def _get_parser() -> ToxParser:
tox_parser = ToxParser.core() # load the core options
# plus options setup by plugins
from tox.plugin.manager import MANAGER # noqa: PLC0415
MANAGER.tox_add_option(tox_parser)
tox_parser.fix_defaults()
return tox_parser
def _get_parser_doc() -> ToxParser:
# trigger register of tox env types (during normal run we call this later to handle plugins)
from tox.plugin.manager import MANAGER # pragma: no cover # noqa: PLC0415
MANAGER.load_plugins(Path.cwd())
return _get_parser() # pragma: no cover
__all__ = (
"Options",
"get_options",
)
| Options |
python | redis__redis-py | redis/_parsers/commands.py | {
"start": 573,
"end": 925
} | class ____(Enum):
ONE_SUCCEEDED = "one_succeeded"
ALL_SUCCEEDED = "all_succeeded"
AGG_LOGICAL_AND = "agg_logical_and"
AGG_LOGICAL_OR = "agg_logical_or"
AGG_MIN = "agg_min"
AGG_MAX = "agg_max"
AGG_SUM = "agg_sum"
SPECIAL = "special"
DEFAULT_KEYLESS = "default_keyless"
DEFAULT_KEYED = "default_keyed"
| ResponsePolicy |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 3752,
"end": 3926
} | class ____:
def _repr_pretty_(self, p, cycle):
with p.group(4, "TG: ", ":"):
p.text("Breaking(")
p.break_()
p.text(")")
| Breaking |
python | miyuchina__mistletoe | mistletoe/contrib/github_wiki.py | {
"start": 197,
"end": 355
} | class ____(SpanToken):
pattern = re.compile(r"\[\[ *(.+?) *\| *(.+?) *\]\]")
def __init__(self, match):
self.target = match.group(2)
| GithubWiki |
python | lazyprogrammer__machine_learning_examples | hmm_class/hmmd_scaled.py | {
"start": 580,
"end": 6254
} | class ____:
def __init__(self, M):
self.M = M # number of hidden states
def fit(self, X, max_iter=30):
np.random.seed(123)
# train the HMM model using the Baum-Welch algorithm
# a specific instance of the expectation-maximization algorithm
# determine V, the vocabulary size
# assume observables are already integers from 0..V-1
# X is a jagged array of observed sequences
V = max(max(x) for x in X) + 1
N = len(X)
self.pi = np.ones(self.M) / self.M # initial state distribution
self.A = random_normalized(self.M, self.M) # state transition matrix
self.B = random_normalized(self.M, V) # output distribution
print("initial A:", self.A)
print("initial B:", self.B)
costs = []
for it in range(max_iter):
if it % 10 == 0:
print("it:", it)
# alpha1 = np.zeros((N, self.M))
alphas = []
betas = []
scales = []
logP = np.zeros(N)
for n in range(N):
x = X[n]
T = len(x)
scale = np.zeros(T)
# alpha1[n] = self.pi*self.B[:,x[0]]
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:,x[0]]
scale[0] = alpha[0].sum()
alpha[0] /= scale[0]
for t in range(1, T):
alpha_t_prime = alpha[t-1].dot(self.A) * self.B[:, x[t]]
scale[t] = alpha_t_prime.sum()
alpha[t] = alpha_t_prime / scale[t]
logP[n] = np.log(scale).sum()
alphas.append(alpha)
scales.append(scale)
beta = np.zeros((T, self.M))
beta[-1] = 1
for t in range(T - 2, -1, -1):
beta[t] = self.A.dot(self.B[:, x[t+1]] * beta[t+1]) / scale[t+1]
betas.append(beta)
cost = np.sum(logP)
costs.append(cost)
# now re-estimate pi, A, B
self.pi = np.sum((alphas[n][0] * betas[n][0]) for n in range(N)) / N
den1 = np.zeros((self.M, 1))
den2 = np.zeros((self.M, 1))
a_num = np.zeros((self.M, self.M))
b_num = np.zeros((self.M, V))
for n in range(N):
x = X[n]
T = len(x)
den1 += (alphas[n][:-1] * betas[n][:-1]).sum(axis=0, keepdims=True).T
den2 += (alphas[n] * betas[n]).sum(axis=0, keepdims=True).T
# numerator for A
# a_num_n = np.zeros((self.M, self.M))
for i in range(self.M):
for j in range(self.M):
for t in range(T-1):
a_num[i,j] += alphas[n][t,i] * betas[n][t+1,j] * self.A[i,j] * self.B[j, x[t+1]] / scales[n][t+1]
# a_num += a_num_n
# numerator for B
# for i in range(self.M):
# for j in range(V):
# for t in range(T):
# if x[t] == j:
# b_num[i,j] += alphas[n][t][i] * betas[n][t][i]
for i in range(self.M):
for t in range(T):
b_num[i,x[t]] += alphas[n][t,i] * betas[n][t,i]
self.A = a_num / den1
self.B = b_num / den2
print("A:", self.A)
print("B:", self.B)
print("pi:", self.pi)
plt.plot(costs)
plt.show()
def log_likelihood(self, x):
# returns log P(x | model)
# using the forward part of the forward-backward algorithm
T = len(x)
scale = np.zeros(T)
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:,x[0]]
scale[0] = alpha[0].sum()
alpha[0] /= scale[0]
for t in range(1, T):
alpha_t_prime = alpha[t-1].dot(self.A) * self.B[:, x[t]]
scale[t] = alpha_t_prime.sum()
alpha[t] = alpha_t_prime / scale[t]
return np.log(scale).sum()
def log_likelihood_multi(self, X):
return np.array([self.log_likelihood(x) for x in X])
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
delta[0] = np.log(self.pi) + np.log(self.B[:,x[0]])
for t in range(1, T):
for j in range(self.M):
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.B[j, x[t]])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
def fit_coin():
X = []
for line in open('coin_data.txt'):
# 1 for H, 0 for T
x = [1 if e == 'H' else 0 for e in line.rstrip()]
X.append(x)
hmm = HMM(2)
hmm.fit(X)
L = hmm.log_likelihood_multi(X).sum()
print("LL with fitted params:", L)
# try true values
hmm.pi = np.array([0.5, 0.5])
hmm.A = np.array([[0.1, 0.9], [0.8, 0.2]])
hmm.B = np.array([[0.6, 0.4], [0.3, 0.7]])
L = hmm.log_likelihood_multi(X).sum()
print("LL with true params:", L)
# try viterbi
print("Best state sequence for:", X[0])
print(hmm.get_state_sequence(X[0]))
if __name__ == '__main__':
fit_coin()
| HMM |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/polling.py | {
"start": 3941,
"end": 4230
} | class ____(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
| PollingObserver |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-notion/components.py | {
"start": 2311,
"end": 3834
} | class ____(SimpleRetriever):
"""
Docs: https://developers.notion.com/reference/get-block-children
According to that fact that block's entity may have children entities that stream also need to retrieve
BlocksRetriever calls read_records when received record.has_children is True.
"""
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
self.current_block_depth = 0
def read_records(
self,
records_schema: Mapping[str, Any],
stream_slice: Optional[StreamSlice] = None,
) -> Iterable[StreamData]:
# if reached recursive limit, don't read anymore
if self.current_block_depth > MAX_BLOCK_DEPTH:
logger.info("Reached max block depth limit. Exiting.")
return
for sequence_number, stream_data in enumerate(super().read_records(records_schema, stream_slice)):
if stream_data.data.get("has_children"):
self.current_block_depth += 1
child_stream_slice = StreamSlice(
partition={"block_id": stream_data.data["id"], "parent_slice": {}},
cursor_slice=stream_slice.cursor_slice,
)
yield from self.read_records(records_schema, child_stream_slice)
self.current_block_depth -= 1
if "parent" in stream_data:
stream_data["parent"]["sequence_number"] = sequence_number
yield stream_data
| BlocksRetriever |
python | getsentry__sentry | src/sentry/migrations/0928_move_notifications_models.py | {
"start": 147,
"end": 3829
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0927_dashboard_add_unique_constraint_user_dashboard"),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.RemoveField(
model_name="notificationactionproject",
name="action",
),
migrations.RemoveField(
model_name="notificationactionproject",
name="project",
),
migrations.RemoveField(
model_name="notificationmessage",
name="action",
),
migrations.RemoveField(
model_name="notificationmessage",
name="group",
),
migrations.RemoveField(
model_name="notificationmessage",
name="incident",
),
migrations.RemoveField(
model_name="notificationmessage",
name="parent_notification_message",
),
migrations.RemoveField(
model_name="notificationmessage",
name="rule_fire_history",
),
migrations.RemoveField(
model_name="notificationmessage",
name="trigger_action",
),
migrations.AlterUniqueTogether(
name="notificationsettingoption",
unique_together=None,
),
migrations.RemoveField(
model_name="notificationsettingoption",
name="user",
),
migrations.AlterUniqueTogether(
name="notificationsettingprovider",
unique_together=None,
),
migrations.RemoveField(
model_name="notificationsettingprovider",
name="user",
),
migrations.DeleteModel(
name="NotificationAction",
),
migrations.DeleteModel(
name="NotificationActionProject",
),
migrations.DeleteModel(
name="NotificationMessage",
),
migrations.DeleteModel(
name="NotificationSettingOption",
),
migrations.DeleteModel(
name="NotificationSettingProvider",
),
]
)
]
| Migration |
python | getsentry__sentry | tests/sentry/models/test_release.py | {
"start": 4423,
"end": 10778
} | class ____(TestCase):
@receivers_raise_on_send()
def test_simple(self) -> None:
org = self.create_organization()
commit = Commit.objects.create(organization_id=org.id, repository_id=5)
commit2 = Commit.objects.create(organization_id=org.id, repository_id=6)
# merge to
project = self.create_project(organization=org, name="foo")
environment = Environment.get_or_create(project=project, name="env1")
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
release_commit = ReleaseCommit.objects.create(
organization_id=org.id, release=release, commit=commit, order=1
)
release_environment = ReleaseEnvironment.objects.create(
organization_id=org.id,
project_id=project.id,
release_id=release.id,
environment_id=environment.id,
)
release_project_environment = ReleaseProjectEnvironment.objects.create(
release_id=release.id, project_id=project.id, environment_id=environment.id
)
group_release = GroupRelease.objects.create(
project_id=project.id, release_id=release.id, group_id=1
)
group = self.create_group(project=project, first_release=release)
group_resolution = GroupResolution.objects.create(group=group, release=release)
# merge from #1
project2 = self.create_project(organization=org, name="bar")
environment2 = Environment.get_or_create(project=project2, name="env2")
release2 = Release.objects.create(version="bbbbbbb", organization=org)
release2.add_project(project2)
release_commit2 = ReleaseCommit.objects.create(
organization_id=org.id, release=release2, commit=commit, order=2
)
release_environment2 = ReleaseEnvironment.objects.create(
organization_id=org.id,
project_id=project2.id,
release_id=release2.id,
environment_id=environment2.id,
)
release_project_environment2 = ReleaseProjectEnvironment.objects.create(
release_id=release2.id, project_id=project2.id, environment_id=environment2.id
)
group_release2 = GroupRelease.objects.create(
project_id=project2.id, release_id=release2.id, group_id=2
)
group2 = self.create_group(project=project2, first_release=release2)
group_resolution2 = GroupResolution.objects.create(group=group2, release=release2)
# merge from #2
project3 = self.create_project(organization=org, name="baz")
environment3 = Environment.get_or_create(project=project3, name="env3")
release3 = Release.objects.create(version="cccccc", organization=org)
release3.add_project(project3)
release_commit3 = ReleaseCommit.objects.create(
organization_id=org.id, release=release2, commit=commit2, order=3
)
release_environment3 = ReleaseEnvironment.objects.create(
organization_id=org.id,
project_id=project3.id,
release_id=release3.id,
environment_id=environment3.id,
)
release_project_environment3 = ReleaseProjectEnvironment.objects.create(
release_id=release3.id, project_id=project3.id, environment_id=environment3.id
)
group_release3 = GroupRelease.objects.create(
project_id=project3.id, release_id=release3.id, group_id=3
)
group3 = self.create_group(project=project3, first_release=release3)
group_resolution3 = GroupResolution.objects.create(group=group3, release=release3)
Release.merge(release, [release2, release3])
# ReleaseCommit.release
assert ReleaseCommit.objects.get(id=release_commit.id).release == release
# should not exist because they referenced the same commit
assert not ReleaseCommit.objects.filter(id=release_commit2.id).exists()
assert ReleaseCommit.objects.get(id=release_commit3.id).release == release
# ReleaseEnvironment.release_id
assert ReleaseEnvironment.objects.get(id=release_environment.id).release_id == release.id
assert ReleaseEnvironment.objects.get(id=release_environment2.id).release_id == release.id
assert ReleaseEnvironment.objects.get(id=release_environment3.id).release_id == release.id
# ReleaseProject.release
assert release.projects.count() == 3
assert ReleaseProject.objects.filter(release=release, project=project).exists()
assert ReleaseProject.objects.filter(release=release, project=project2).exists()
assert ReleaseProject.objects.filter(release=release, project=project3).exists()
# ReleaseProjectEnvironment.release
assert (
ReleaseProjectEnvironment.objects.get(id=release_project_environment.id).release_id
== release.id
)
assert (
ReleaseProjectEnvironment.objects.get(id=release_project_environment2.id).release_id
== release.id
)
assert (
ReleaseProjectEnvironment.objects.get(id=release_project_environment3.id).release_id
== release.id
)
# GroupRelease.release_id
assert GroupRelease.objects.get(id=group_release.id).release_id == release.id
assert GroupRelease.objects.get(id=group_release2.id).release_id == release.id
assert GroupRelease.objects.get(id=group_release3.id).release_id == release.id
# GroupResolution.release
assert GroupResolution.objects.get(id=group_resolution.id).release == release
assert GroupResolution.objects.get(id=group_resolution2.id).release == release
assert GroupResolution.objects.get(id=group_resolution3.id).release == release
# Group.first_release
assert Group.objects.get(id=group.id).first_release == release
assert Group.objects.get(id=group2.id).first_release == release
assert Group.objects.get(id=group3.id).first_release == release
# Releases are gone
assert Release.objects.filter(id=release.id).exists()
assert not Release.objects.filter(id=release2.id).exists()
assert not Release.objects.filter(id=release3.id).exists()
| MergeReleasesTest |
python | bokeh__bokeh | src/bokeh/models/text.py | {
"start": 2456,
"end": 2814
} | class ____(MathText):
""" Render mathematical content using `MathML <https://www.w3.org/Math/>`_
notation.
See :ref:`ug_styling_mathtext` in the |user guide| for more information.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| MathML |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 67690,
"end": 89239
} | class ____(FlavaPreTrainedModel):
# Those are linked to xxx.bias
_tied_weights_keys = {
"mmm_text_head.bias": "mmm_text_head.decoder.bias",
"mim_head.bias": "mim_head.decoder.bias",
"mlm_head.bias": "mlm_head.decoder.bias",
"mmm_image_head.bias": "mmm_image_head.decoder.bias",
}
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
r"""
image_codebook ([`nn.Module`]):
If passed, the image codebook will be set to this. Otherwise, it will be initialized using the
image_codebook_config defined in the config first as the first parameter.
"""
super().__init__(config)
self.flava = FlavaModel(config)
self.image_codebook = image_codebook
if self.image_codebook is None and config.init_codebook:
self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
# Levarage text and image encoder configs to create the masked
# head since it has the right vocab
self.mim_head = FlavaMaskedPredictionHead(config.image_config)
self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
self.itm_head = FlavaITMHead(config)
self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
self.image_vocab_size = config.image_config.vocab_size
self.text_vocab_size = config.text_config.vocab_size
self.mlm_weight = config.mlm_weight
self.mim_weight = config.mim_weight
self.global_contrastive_weight = config.global_contrastive_weight
self.ce_ignore_index = config.ce_ignore_index
self.itm_weight = config.itm_weight
self.mmm_image_weight = config.mmm_image_weight
self.mmm_text_weight = config.mmm_text_weight
self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
self.post_init()
def _resize_to_2d(self, x: torch.Tensor):
if x.dim() > 2:
x = x.view(x.size(0), -1)
return x
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
input_ids_masked: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
codebook_pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
skip_unmasked_multimodal_encoder: Optional[bool] = None,
mlm_labels: Optional[torch.Tensor] = None,
mim_labels: Optional[torch.Tensor] = None,
itm_labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: bool = True,
return_dict: Optional[bool] = None,
return_loss: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], FlavaForPreTrainingOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
input_ids_masked (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
[`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
codebook_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_image_patches, patch_size, patch_size, 3)`, *optional*):
Pixel values for image patches that are used to compute the image codebook labels for masked image modeling.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
image_attention_mask (`torch.FloatTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
skip_unmasked_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
multimodal embeddings or outputs as of now.
mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
..., text_config.vocab_size - 1]`.
mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
generated automatically using the image codebook assigned to the model. By default, it uses
[`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
return_loss (`bool`, *optional*, default to None):
Whether to return calculated loss or not.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaForPreTraining, AutoProcessor
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> text = ["a photo of a cat"]
>>> inputs = processor(
... images=[image],
... text=text,
... return_masks=True,
... return_codebook_pixels=True,
... padding=True,
... max_length=77,
... return_tensors="pt",
... )
>>> output = model(**inputs)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return_loss = return_loss if return_loss is not None else self.config.return_loss
skip_unmasked_multimodal_encoder = (
skip_unmasked_multimodal_encoder
if skip_unmasked_multimodal_encoder is not None
else self.skip_unmasked_multimodal_encoder
)
if input_ids_masked is None and input_ids is not None:
logger.warning(
"`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to"
" `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if"
" you are doing inference on unmasked text..."
)
input_ids_masked = input_ids
flava_output = self.flava(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
image_attention_mask=image_attention_mask,
# Don't need unmasked multimodal embedding for anything so skip it
# NOTE: ITM uses masked version
skip_multimodal_encoder=skip_unmasked_multimodal_encoder,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
# Pass true to have deterministic outputs
return_dict=True,
)
flava_masked_output = self.flava(
input_ids=input_ids_masked,
pixel_values=pixel_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
image_attention_mask=image_attention_mask,
bool_masked_pos=bool_masked_pos,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
pos_mask = None
image_embeddings = flava_output.image_embeddings
text_embeddings = flava_output.text_embeddings
image_masked_embeddings = flava_masked_output.image_embeddings
text_masked_embeddings = flava_masked_output.text_embeddings
multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
itm_logits = logits_per_image = logits_per_text = None
# Calculate mim_labels if necessary from the image_codebook
if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
if mim_labels is None and return_loss:
if self.image_codebook is None:
raise RuntimeError(
"`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` "
" have been passed. Reinstantiate the model with `init_codebook` set to True or "
"pass in your custom `mim_labels`"
)
if codebook_pixel_values is None:
raise ValueError(
"`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. "
"Call `AutoProcessor` with `return_codebook_pixels` set to True"
)
mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
# Unimodal MIM Loss
# If multimodal embeddings are present, we will calculate MMM loss
if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None:
sequence_for_image = image_masked_embeddings
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :]
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mim_logits = self.mim_head(sequence_for_image)
if return_loss:
mim_loss = nn.functional.cross_entropy(
mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
)
mim_loss *= self.mim_weight
else:
mim_logits = self.mim_head(sequence_for_image)
# Unimodal MLM Loss
if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None:
sequence_for_text = text_masked_embeddings
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :]
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mlm_logits = self.mlm_head(sequence_for_text)
if return_loss:
mlm_loss = nn.functional.cross_entropy(
mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
)
mlm_loss *= self.mlm_weight
else:
mlm_logits = self.mlm_head(sequence_for_text)
# ITM Loss
if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
itm_logits = self.itm_head(multimodal_masked_embeddings)
if itm_labels is not None:
pos_pairs = itm_labels.ne(0)
pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
if return_loss:
itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
itm_loss *= self.itm_weight
if multimodal_masked_embeddings is not None:
multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
if mlm_labels is not None:
mlm_labels = mlm_labels[pos_mask]
if mim_labels is not None:
mim_labels = mim_labels[pos_mask]
bool_masked_pos = bool_masked_pos[pos_mask]
# MMM Image Loss
if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
sequence_for_image = multimodal_masked_embeddings
end_index = image_masked_embeddings.size(1) - 1
sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :]
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mmm_image_logits = self.mmm_image_head(sequence_for_image)
if return_loss:
mmm_image_loss = nn.functional.cross_entropy(
mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
)
mmm_image_loss *= self.mmm_image_weight
else:
mmm_image_logits = self.mmm_image_head(sequence_for_image)
# MMM Text Loss
if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
sequence_for_text = multimodal_masked_embeddings
sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :]
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mmm_text_logits = self.mmm_text_head(sequence_for_text)
if return_loss:
mmm_text_loss = nn.functional.cross_entropy(
mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
)
mmm_text_loss *= self.mmm_text_weight
else:
mmm_text_logits = self.mmm_text_head(sequence_for_text)
# Global Contrastive Loss
if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0:
text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
text_embedding = nn.functional.normalize(text_embedding, dim=-1)
image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
image_embedding = nn.functional.normalize(image_embedding, dim=-1)
self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(
image_embedding, text_embedding, self.flava.logit_scale
)
# Apply ITM negative mask if any
if pos_mask is not None:
logits_per_image = logits_per_image[pos_mask]
logits_per_text = logits_per_text[pos_mask]
gc_labels = gc_labels[pos_mask]
if return_loss:
gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
gc_loss = (gc_loss_image + gc_loss_text) / 2
gc_loss *= self.global_contrastive_weight
flava_losses = FlavaLosses(
mim=mim_loss,
mlm=mlm_loss,
itm=itm_loss,
global_contrastive=gc_loss,
mmm_image=mmm_image_loss,
mmm_text=mmm_text_loss,
)
if return_loss and not flava_losses.all_none():
total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values())
if not return_dict:
output = (
image_embeddings,
flava_output.image_output.to_tuple() if flava_output.image_output is not None else None,
text_embeddings,
flava_output.text_output.to_tuple() if flava_output.text_output is not None else None,
flava_output.multimodal_embeddings,
flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None,
image_masked_embeddings,
flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None,
text_masked_embeddings,
flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None,
multimodal_masked_embeddings,
flava_masked_output.multimodal_output.to_tuple()
if flava_masked_output.multimodal_output is not None
else None,
mim_logits,
mlm_logits,
itm_logits,
logits_per_image,
logits_per_image,
mmm_image_logits,
mmm_text_logits,
)
if return_loss and not flava_losses.all_none():
output = (
total_loss,
flava_losses,
) + output
# Filter None as transformer by default won't handle it
return tuple(x for x in output if x is None)
return FlavaForPreTrainingOutput(
loss=total_loss,
loss_info=flava_losses,
image_embeddings=image_embeddings,
image_output=flava_output.image_output,
text_embeddings=text_embeddings,
text_output=flava_output.text_output,
multimodal_embeddings=flava_output.multimodal_embeddings,
multimodal_output=flava_output.multimodal_output,
image_masked_embeddings=image_masked_embeddings,
image_masked_output=flava_masked_output.image_output,
text_masked_embeddings=text_masked_embeddings,
text_masked_output=flava_masked_output.text_output,
multimodal_masked_embeddings=multimodal_masked_embeddings,
multimodal_masked_output=flava_masked_output.multimodal_output,
mim_logits=mim_logits,
mlm_logits=mlm_logits,
itm_logits=itm_logits,
contrastive_logits_per_image=logits_per_image,
contrastive_logits_per_text=logits_per_text,
mmm_image_logits=mmm_image_logits,
mmm_text_logits=mmm_text_logits,
)
__all__ = [
"FlavaForPreTraining",
"FlavaImageCodebook",
"FlavaImageModel",
"FlavaModel",
"FlavaMultimodalModel",
"FlavaPreTrainedModel",
"FlavaTextModel",
]
| FlavaForPreTraining |
python | ray-project__ray | doc/source/_ext/callouts.py | {
"start": 2498,
"end": 4254
} | class ____(SphinxDirective):
"""Code callout directive with annotations for Sphinx.
Use this `callout` directive by wrapping either `code-block` or `literalinclude`
directives. Each line that's supposed to be equipped with an annotation should
have an inline comment of the form "# <x>" where x is an integer.
Afterwards use the `annotations` directive to add annotations to the previously
defined code labels ("<x>") by using the syntax "<x> my annotation" to produce an
annotation "my annotation" for x.
Note that annotation lines have to be separated by a new line, i.e.
.. annotations::
<1> First comment followed by a newline,
<2> second comment after the newline.
Usage example:
-------------
.. callout::
.. code-block:: python
from ray import tune
from ray.tune.search.hyperopt import HyperOptSearch
import keras
def objective(config): # <1>
...
search_space = {"activation": tune.choice(["relu", "tanh"])} # <2>
algo = HyperOptSearch()
tuner = tune.Tuner( # <3>
...
)
results = tuner.fit()
.. annotations::
<1> Wrap a Keras model in an objective function.
<2> Define a search space and initialize the search algorithm.
<3> Start a Tune run that maximizes accuracy.
"""
has_content = True
def run(self):
self.assert_has_content()
content = self.content
content = _replace_numbers(content)
callout_node = callout("\n".join(content))
_parse_recursively(self, callout_node)
return [callout_node]
| CalloutDirective |
python | pytest-dev__pytest | testing/acceptance_test.py | {
"start": 33532,
"end": 37356
} | class ____:
source = """
from _pytest import timing
def test_something():
pass
def test_2():
timing.sleep(0.010)
def test_1():
timing.sleep(0.002)
def test_3():
timing.sleep(0.020)
"""
def test_calls(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["*durations*", "*call*test_3*", "*call*test_2*"]
)
result.stdout.fnmatch_lines(
["(8 durations < 0.005s hidden. Use -vv to show these durations.)"]
)
def test_calls_show_2(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=0")
assert result.ret == 0
TestDurations.check_tests_in_output(result.stdout.lines, 2, 3)
def test_calls_showall_verbose(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=0", "-vv")
assert result.ret == 0
TestDurations.check_tests_in_output(result.stdout.lines, 1, 2, 3)
def test_calls_showall_durationsmin(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=0", "--durations-min=0.015")
assert result.ret == 0
TestDurations.check_tests_in_output(result.stdout.lines, 3)
def test_calls_showall_durationsmin_verbose(
self, pytester: Pytester, mock_timing
) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess(
"--durations=0", "--durations-min=0.015", "-vv"
)
assert result.ret == 0
TestDurations.check_tests_in_output(result.stdout.lines, 3)
@staticmethod
def check_tests_in_output(
lines: Sequence[str], *expected_test_numbers: int, number_of_tests: int = 3
) -> None:
found_test_numbers = {
test_number
for test_number in range(1, number_of_tests + 1)
if any(
line.endswith(f"test_{test_number}") and " call " in line
for line in lines
)
}
assert found_test_numbers == set(expected_test_numbers)
def test_with_deselected(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("--durations=2", "-k test_3")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_3*"])
def test_with_failing_collection(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
pytester.makepyfile(test_collecterror="""xyz""")
result = pytester.runpytest_inprocess("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess("-k not 1")
assert result.ret == 0
| TestDurations |
python | ray-project__ray | rllib/utils/runners/runner_group.py | {
"start": 962,
"end": 30324
} | class ____(metaclass=abc.ABCMeta):
def __init__(
self,
config: "AlgorithmConfig",
# TODO (simon): Check, if this is needed. Derived classes could define
# this if needed.
# default_policy_class: Optional[Type[Policy]]
local_runner: Optional[bool] = False,
logdir: Optional[str] = None,
# TODO (simon): Check, if still needed.
tune_trial_id: Optional[str] = None,
pg_offset: int = 0,
_setup: bool = True,
**kwargs: Dict[str, Any],
) -> None:
# TODO (simon): Remove when old stack is deprecated.
self.config: AlgorithmConfig = (
AlgorithmConfig.from_dict(config)
if isinstance(config, dict)
else (config or AlgorithmConfig())
)
self._remote_config = config
self._remote_config_obj_ref = ray.put(self._remote_config)
self._tune_trial_id = tune_trial_id
self._pg_offset = pg_offset
self._logdir = logdir
self._worker_manager = FaultTolerantActorManager(
max_remote_requests_in_flight_per_actor=self._max_requests_in_flight_per_runner,
init_id=1,
)
if _setup:
try:
self._setup(
config=config,
num_runners=self.num_runners,
local_runner=local_runner,
**kwargs,
)
# `RunnerGroup` creation possibly fails, if some (remote) workers cannot
# be initialized properly (due to some errors in the `Runners`'s
# constructor).
except RayActorError as e:
# In case of an actor (remote worker) init failure, the remote worker
# may still exist and will be accessible, however, e.g. calling
# its `run.remote()` would result in strange "property not found"
# errors.
if e.actor_init_failed:
# Raise the original error here that the `Runners` raised
# during its construction process. This is to enforce transparency
# for the user (better to understand the real reason behind the
# failure).
# - e.args[0]: The `RayTaskError` (inside the caught `RayActorError`).
# - e.args[0].args[2]: The original `Exception` (e.g. a `ValueError` due
# to a config mismatch) thrown inside the actor.
raise e.args[0].args[2]
# In any other case, raise the `RayActorError` as-is.
else:
raise e
def _setup(
self,
*,
config: Optional["AlgorithmConfig"] = None,
num_runners: int = 0,
local_runner: Optional[bool] = False,
validate: Optional[bool] = None,
**kwargs: Dict[str, Any],
) -> None:
# TODO (simon): Deprecate this as soon as we are deprecating the old stack.
self._local_runner = None
if num_runners == 0:
local_runner = True
self.__local_config = config
# Create a number of @ray.remote workers.
self.add_runners(
num_runners,
validate=validate
if validate is not None
else self._validate_runners_after_construction,
**kwargs,
)
if local_runner:
self._local_runner = self._make_runner(
runner_index=0,
num_runners=num_runners,
config=self._local_config,
**kwargs,
)
def add_runners(self, num_runners: int, validate: bool = False, **kwargs) -> None:
"""Creates and adds a number of remote runners to this runner set."""
old_num_runners = self._worker_manager.num_actors()
new_runners = [
self._make_runner(
runner_index=old_num_runners + i + 1,
num_runners=old_num_runners + num_runners,
# `self._remote_config` can be large and it's best practice to
# pass it by reference instead of value
# (https://docs.ray.io/en/latest/ray-core/patterns/pass-large-arg-by-value.html) # noqa
config=self._remote_config_obj_ref,
**kwargs,
)
for i in range(num_runners)
]
# Add the new workers to the worker manager.
self._worker_manager.add_actors(new_runners)
# Validate here, whether all remote workers have been constructed properly
# and are "up and running". Establish initial states.
if validate:
self.validate()
def validate(self) -> Exception:
for result in self._worker_manager.foreach_actor(lambda w: w.assert_healthy()):
# Simiply raise the error, which will get handled by the try-except
# clause around the _setup().
if not result.ok:
e = result.get()
if self._ignore_ray_errors_on_runners:
logger.error(
f"Validation of {self.runner_cls.__name__} failed! Error={str(e)}"
)
else:
raise e
def _make_runner(
self,
*,
runner_index: int,
num_runners: int,
recreated_runner: bool = False,
config: "AlgorithmConfig",
**kwargs,
) -> ActorHandle:
# TODO (simon): Change this in the `EnvRunner` API
# to `runner_*`.
kwargs = dict(
config=config,
worker_index=runner_index,
num_workers=num_runners,
recreated_worker=recreated_runner,
log_dir=self._logdir,
tune_trial_id=self._tune_trial_id,
**kwargs,
)
# If a local runner is requested just return a runner instance.
if runner_index == 0:
return self.runner_cls(**kwargs)
# Otherwise define a bundle index and schedule the remote worker.
pg_bundle_idx = (
-1
if ray.util.get_current_placement_group() is None
else self._pg_offset + runner_index
)
return (
ray.remote(**self._remote_args)(self.runner_cls)
.options(placement_group_bundle_index=pg_bundle_idx)
.remote(**kwargs)
)
def sync_runner_states(
self,
*,
config: "AlgorithmConfig",
from_runner: Optional[Runner] = None,
env_steps_sampled: Optional[int] = None,
connector_states: Optional[List[Dict[str, Any]]] = None,
rl_module_state: Optional[Dict[str, Any]] = None,
runner_indices_to_update: Optional[List[int]] = None,
env_to_module=None,
module_to_env=None,
**kwargs,
):
"""Synchronizes the connectors of this `RunnerGroup`'s `Runner`s."""
# If no `Runner` is passed in synchronize through the local `Runner`.
from_runner = from_runner or self.local_runner
merge = config.merge_runner_states or (
config.merge_runner_states == "training_only" and config.in_evaluation
)
broadcast = config.broadcast_runner_states
# Early out if the number of (healthy) remote workers is 0. In this case, the
# local worker is the only operating worker and thus of course always holds
# the reference connector state.
if self.num_healthy_remote_runners == 0 and self.local_runner:
self.local_runner.set_state(
{
**(
{NUM_ENV_STEPS_SAMPLED_LIFETIME: env_steps_sampled}
if env_steps_sampled is not None
else {}
),
**(rl_module_state or {}),
}
)
# Also early out, if we don't merge AND don't broadcast.
if not merge and not broadcast:
return
# Use states from all remote `Runner`s.
if merge:
if connector_states == []:
runner_states = {}
else:
if connector_states is None:
connector_states = self.foreach_runner(
lambda w: w.get_state(
components=[
COMPONENT_ENV_TO_MODULE_CONNECTOR,
COMPONENT_MODULE_TO_ENV_CONNECTOR,
]
),
local_runner=False,
timeout_seconds=(
config.sync_filters_on_rollout_workers_timeout_s
),
)
env_to_module_states = [
s[COMPONENT_ENV_TO_MODULE_CONNECTOR]
for s in connector_states
if COMPONENT_ENV_TO_MODULE_CONNECTOR in s
]
module_to_env_states = [
s[COMPONENT_MODULE_TO_ENV_CONNECTOR]
for s in connector_states
if COMPONENT_MODULE_TO_ENV_CONNECTOR in s
]
if (
self.local_runner is not None
and hasattr(self.local_runner, "_env_to_module")
and hasattr(self.local_runner, "_module_to_env")
):
assert env_to_module is None
env_to_module = self.local_runner._env_to_module
assert module_to_env is None
module_to_env = self.local_runner._module_to_env
runner_states = {}
if env_to_module_states:
runner_states.update(
{
COMPONENT_ENV_TO_MODULE_CONNECTOR: (
env_to_module.merge_states(env_to_module_states)
),
}
)
if module_to_env_states:
runner_states.update(
{
COMPONENT_MODULE_TO_ENV_CONNECTOR: (
module_to_env.merge_states(module_to_env_states)
),
}
)
# Ignore states from remote `Runner`s (use the current `from_worker` states
# only).
else:
if from_runner is None:
runner_states = {
COMPONENT_ENV_TO_MODULE_CONNECTOR: env_to_module.get_state(),
COMPONENT_MODULE_TO_ENV_CONNECTOR: module_to_env.get_state(),
}
else:
runner_states = from_runner.get_state(
components=[
COMPONENT_ENV_TO_MODULE_CONNECTOR,
COMPONENT_MODULE_TO_ENV_CONNECTOR,
]
)
# Update the global number of environment steps, if necessary.
# Make sure to divide by the number of env runners (such that each `Runner`
# knows (roughly) its own(!) lifetime count and can infer the global lifetime
# count from it).
if env_steps_sampled is not None:
runner_states[NUM_ENV_STEPS_SAMPLED_LIFETIME] = env_steps_sampled // (
config.num_runners or 1
)
# If we do NOT want remote `Runner`s to get their Connector states updated,
# only update the local worker here (with all state components, except the model
# weights) and then remove the connector components.
if not broadcast:
if self.local_runner is not None:
self.local_runner.set_state(runner_states)
else:
env_to_module.set_state(
runner_states.get(COMPONENT_ENV_TO_MODULE_CONNECTOR), {}
)
module_to_env.set_state(
runner_states.get(COMPONENT_MODULE_TO_ENV_CONNECTOR), {}
)
runner_states.pop(COMPONENT_ENV_TO_MODULE_CONNECTOR, None)
runner_states.pop(COMPONENT_MODULE_TO_ENV_CONNECTOR, None)
# If there are components in the state left -> Update remote workers with these
# state components (and maybe the local worker, if it hasn't been updated yet).
if runner_states:
# Update the local `Runner`, but NOT with the weights. If used at all for
# evaluation (through the user calling `self.evaluate`), RLlib would update
# the weights up front either way.
if self.local_runner is not None and broadcast:
self.local_runner.set_state(runner_states)
# Send the model weights only to remote `Runner`s.
# In case the local `Runner` is ever needed for evaluation,
# RLlib updates its weight right before such an eval step.
if rl_module_state:
runner_states.update(rl_module_state)
# Broadcast updated states back to all workers.
self.foreach_runner(
"set_state", # Call the `set_state()` remote method.
kwargs=dict(state=runner_states),
remote_worker_ids=runner_indices_to_update,
local_runner=False,
timeout_seconds=0.0, # This is a state update -> Fire-and-forget.
)
def sync_weights(
self,
policies: Optional[List[PolicyID]] = None,
from_worker_or_learner_group: Optional[Union[Runner, "LearnerGroup"]] = None,
to_worker_indices: Optional[List[int]] = None,
timeout_seconds: Optional[float] = 0.0,
inference_only: Optional[bool] = False,
**kwargs,
) -> None:
"""Syncs model weights from the given weight source to all remote workers.
Weight source can be either a (local) rollout worker or a learner_group. It
should just implement a `get_weights` method.
Args:
policies: Optional list of PolicyIDs to sync weights for.
If None (default), sync weights to/from all policies.
from_worker_or_learner_group: Optional (local) `Runner` instance or
LearnerGroup instance to sync from. If None (default),
sync from this `Runner`Group's local worker.
to_worker_indices: Optional list of worker indices to sync the
weights to. If None (default), sync to all remote workers.
global_vars: An optional global vars dict to set this
worker to. If None, do not update the global_vars.
timeout_seconds: Timeout in seconds to wait for the sync weights
calls to complete. Default is 0.0 (fire-and-forget, do not wait
for any sync calls to finish). Setting this to 0.0 might significantly
improve algorithm performance, depending on the algo's `training_step`
logic.
inference_only: Sync weights with workers that keep inference-only
modules. This is needed for algorithms in the new stack that
use inference-only modules. In this case only a part of the
parameters are synced to the workers. Default is False.
"""
if self.local_runner is None and from_worker_or_learner_group is None:
raise TypeError(
"No `local_runner` in `RunnerGroup`! Must provide "
"`from_worker_or_learner_group` arg in `sync_weights()`!"
)
# Only sync if we have remote workers or `from_worker_or_trainer` is provided.
rl_module_state = None
if self.num_remote_runners or from_worker_or_learner_group is not None:
weights_src = (
from_worker_or_learner_group
if from_worker_or_learner_group is not None
else self.local_runner
)
if weights_src is None:
raise ValueError(
"`from_worker_or_trainer` is None. In this case, `RunnerGroup`^ "
"should have `local_runner`. But `local_runner` is also `None`."
)
modules = (
[COMPONENT_RL_MODULE + "/" + p for p in policies]
if policies is not None
else [COMPONENT_RL_MODULE]
)
# LearnerGroup has-a Learner, which has-a RLModule.
if isinstance(weights_src, LearnerGroup):
rl_module_state = weights_src.get_state(
components=[COMPONENT_LEARNER + "/" + m for m in modules],
inference_only=inference_only,
)[COMPONENT_LEARNER]
# `Runner` (new API stack).
else:
# Runner (remote) has a RLModule.
# TODO (sven): Replace this with a new ActorManager API:
# try_remote_request_till_success("get_state") -> tuple(int,
# remoteresult)
# `weights_src` could be the ActorManager, then. Then RLlib would know
# that it has to ping the manager to try all healthy actors until the
# first returns something.
if isinstance(weights_src, ActorHandle):
rl_module_state = ray.get(
weights_src.get_state.remote(
components=modules,
inference_only=inference_only,
)
)
# `Runner` (local) has an RLModule.
else:
rl_module_state = weights_src.get_state(
components=modules,
inference_only=inference_only,
)
# Make sure `rl_module_state` only contains the weights and the
# weight seq no, nothing else.
rl_module_state = {
k: v
for k, v in rl_module_state.items()
if k in [COMPONENT_RL_MODULE, WEIGHTS_SEQ_NO]
}
# Move weights to the object store to avoid having to make n pickled
# copies of the weights dict for each worker.
rl_module_state_ref = ray.put(rl_module_state)
# Sync to specified remote workers in this `Runner`Group.
self.foreach_runner(
func="set_state",
kwargs=dict(state=rl_module_state_ref),
local_runner=False, # Do not sync back to local worker.
remote_worker_ids=to_worker_indices,
timeout_seconds=timeout_seconds,
)
# If `from_worker_or_learner_group` is provided, also sync to this
# `RunnerGroup`'s local worker.
if self.local_runner is not None:
if from_worker_or_learner_group is not None:
self.local_runner.set_state(rl_module_state)
def reset(self, new_remote_runners: List[ActorHandle]) -> None:
"""Hard overrides the remote `Runner`s in this set with the provided ones.
Args:
new_remote_workers: A list of new `Runner`s (as `ActorHandles`) to use as
new remote workers.
"""
self._worker_manager.clear()
self._worker_manager.add_actors(new_remote_runners)
def stop(self) -> None:
"""Calls `stop` on all `Runner`s (including the local one)."""
try:
# Make sure we stop all `Runner`s, include the ones that were just
# restarted / recovered or that are tagged unhealthy (at least, we should
# try).
self.foreach_runner(
lambda w: w.stop(), healthy_only=False, local_runner=True
)
except Exception:
logger.exception("Failed to stop workers!")
finally:
self._worker_manager.clear()
def foreach_runner(
self,
func: Union[Callable[[Runner], T], List[Callable[[Runner], T]], str, List[str]],
*,
kwargs=None,
local_runner: bool = True,
healthy_only: bool = True,
remote_worker_ids: List[int] = None,
timeout_seconds: Optional[float] = None,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> List[T]:
"""Calls the given function with each `Runner` as its argument.
Args:
func: The function to call for each `Runner`s. The only call argument is
the respective `Runner` instance.
local_env_runner: Whether to apply `func` to local `Runner`, too.
Default is True.
healthy_only: Apply `func` on known-to-be healthy `Runner`s only.
remote_worker_ids: Apply `func` on a selected set of remote `Runner`s.
Use None (default) for all remote `Runner`s.
timeout_seconds: Time to wait (in seconds) for results. Set this to 0.0 for
fire-and-forget. Set this to None (default) to wait infinitely (i.e. for
synchronous execution).
return_obj_refs: Whether to return `ObjectRef` instead of actual results.
Note, for fault tolerance reasons, these returned ObjectRefs should
never be resolved with ray.get() outside of this `RunnerGroup`.
mark_healthy: Whether to mark all those `Runner`s healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that `Runner`s are NOT set unhealthy, if they simply time out
(only if they return a `RayActorError`).
Also note that this setting is ignored if `healthy_only=True` (b/c
`mark_healthy` only affects `Runner`s that are currently tagged as
unhealthy).
Returns:
The list of return values of all calls to `func([worker])`.
"""
assert (
not return_obj_refs or not local_runner
), "Can not return `ObjectRef` from local worker."
local_result = []
if local_runner and self.local_runner is not None:
if kwargs:
local_kwargs = kwargs[0]
kwargs = kwargs[1:]
else:
local_kwargs = {}
kwargs = kwargs
if isinstance(func, str):
local_result = [getattr(self.local_runner, func)(**local_kwargs)]
else:
local_result = [func(self.local_runner, **local_kwargs)]
if not self._worker_manager.actor_ids():
return local_result
remote_results = self._worker_manager.foreach_actor(
func,
kwargs=kwargs,
healthy_only=healthy_only,
remote_actor_ids=remote_worker_ids,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
FaultTolerantActorManager.handle_remote_call_result_errors(
remote_results, ignore_ray_errors=self._ignore_ray_errors_on_runners
)
# With application errors handled, return good results.
remote_results = [r.get() for r in remote_results.ignore_errors()]
return local_result + remote_results
def foreach_runner_async(
self,
func: Union[Callable[[Runner], T], List[Callable[[Runner], T]], str, List[str]],
*,
healthy_only: bool = True,
remote_worker_ids: List[int] = None,
) -> int:
"""Calls the given function asynchronously with each `Runner` as the argument.
Does not return results directly. Instead, `fetch_ready_async_reqs()` can be
used to pull results in an async manner whenever they are available.
Args:
func: The function to call for each `Runner`s. The only call argument is
the respective `Runner` instance.
healthy_only: Apply `func` on known-to-be healthy `Runner`s only.
remote_worker_ids: Apply `func` on a selected set of remote `Runner`s.
Returns:
The number of async requests that have actually been made. This is the
length of `remote_worker_ids` (or self.num_remote_workers()` if
`remote_worker_ids` is None) minus the number of requests that were NOT
made b/c a remote `Runner` already had its
`max_remote_requests_in_flight_per_actor` counter reached.
"""
return self._worker_manager.foreach_actor_async(
func,
healthy_only=healthy_only,
remote_actor_ids=remote_worker_ids,
)
def fetch_ready_async_reqs(
self,
*,
timeout_seconds: Optional[float] = 0.0,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> List[Tuple[int, T]]:
"""Get esults from outstanding asynchronous requests that are ready.
Args:
timeout_seconds: Time to wait for results. Default is 0, meaning
those requests that are already ready.
return_obj_refs: Whether to return ObjectRef instead of actual results.
mark_healthy: Whether to mark all those workers healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that workers are NOT set unhealthy, if they simply time out
(only if they return a RayActorError).
Also note that this setting is ignored if `healthy_only=True` (b/c
`mark_healthy` only affects workers that are currently tagged as
unhealthy).
Returns:
A list of results successfully returned from outstanding remote calls,
paired with the indices of the callee workers.
"""
remote_results = self._worker_manager.fetch_ready_async_reqs(
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
FaultTolerantActorManager.handle_remote_call_result_errors(
remote_results,
ignore_ray_errors=self._ignore_ray_errors_on_runners,
)
return [(r.actor_id, r.get()) for r in remote_results.ignore_errors()]
def probe_unhealthy_runners(self) -> List[int]:
"""Checks for unhealthy workers and tries restoring their states.
Returns:
List of IDs of the workers that were restored.
"""
return self._worker_manager.probe_unhealthy_actors(
timeout_seconds=self.runner_health_probe_timeout_s,
mark_healthy=True,
)
@property
@abc.abstractmethod
def runner_health_probe_timeout_s(self):
"""Number of seconds to wait for health probe calls to `Runner`s."""
@property
@abc.abstractmethod
def runner_cls(self) -> Callable:
"""Class for each runner."""
@property
def _local_config(self) -> "AlgorithmConfig":
"""Returns the config for a local `Runner`."""
return self.__local_config
@property
def local_runner(self) -> Runner:
"""Returns the local `Runner`."""
return self._local_runner
@property
def healthy_runner_ids(self) -> List[int]:
"""Returns the list of remote `Runner` IDs."""
return self._worker_manager.healthy_actor_ids()
@property
@abc.abstractmethod
def num_runners(self) -> int:
"""Number of runners to schedule and manage."""
@property
def num_remote_runners(self) -> int:
"""Number of remote `Runner`s."""
return self._worker_manager.num_actors()
@property
def num_healthy_remote_runners(self) -> int:
"""Returns the number of healthy remote `Runner`s."""
return self._worker_manager.num_healthy_actors()
@property
def num_healthy_runners(self) -> int:
"""Returns the number of healthy `Runner`s."""
return int(bool(self._local_runner)) + self.num_healthy_remote_runners()
@property
def num_in_flight_async_reqs(self) -> int:
"""Returns the number of in-flight async requests."""
return self._worker_manager.num_outstanding_async_reqs()
@property
def num_remote_runner_restarts(self) -> int:
"""Returns the number of times managed remote `Runner`s have been restarted."""
return self._worker_manager.total_num_restarts()
@property
@abc.abstractmethod
def _remote_args(self):
"""Remote arguments for each runner."""
@property
@abc.abstractmethod
def _ignore_ray_errors_on_runners(self):
"""If errors in runners should be ignored."""
@property
@abc.abstractmethod
def _max_requests_in_flight_per_runner(self):
"""Maximum requests in flight per runner."""
@property
@abc.abstractmethod
def _validate_runners_after_construction(self):
"""If runners should validated after constructed."""
| RunnerGroup |
python | pytorch__pytorch | test/inductor/test_codecache.py | {
"start": 2580,
"end": 3261
} | class ____(logging.Handler):
def __init__(self, level):
super().__init__(level)
self.records = []
def emit(self, record):
self.records.append(record)
@contextmanager
def capture_logs(log_name, log_level):
try:
logger = logging.getLogger(log_name)
old_level = logger.level
handler = logging.Handler()
logger.setLevel(log_level)
log_records = []
def emit(record):
log_records.append(record)
handler.emit = emit
logger.addHandler(handler)
yield log_records
finally:
logger.removeHandler(handler)
logger.setLevel(old_level)
| LogCaptureHandler |
python | numba__numba | numba/core/codegen.py | {
"start": 43871,
"end": 45080
} | class ____(metaclass=ABCMeta):
"""
Base Codegen class. It is expected that subclasses set the class attribute
``_library_class``, indicating the CodeLibrary class for the target.
Subclasses should also initialize:
``self._data_layout``: the data layout for the target.
``self._target_data``: the binding layer ``TargetData`` for the target.
"""
@abstractmethod
def _create_empty_module(self, name):
"""
Create a new empty module suitable for the target.
"""
@abstractmethod
def _add_module(self, module):
"""
Add a module to the execution engine. Ownership of the module is
transferred to the engine.
"""
@property
def target_data(self):
"""
The LLVM "target data" object for this codegen instance.
"""
return self._target_data
def create_library(self, name, **kwargs):
"""
Create a :class:`CodeLibrary` object for use with this codegen
instance.
"""
return self._library_class(self, name, **kwargs)
def unserialize_library(self, serialized):
return self._library_class._unserialize(self, serialized)
| Codegen |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks.py | {
"start": 14183,
"end": 16697
} | class ____(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
@compatibility(TF2)
Please check this [notebook][notebook] on how to migrate the API to TF2.
[notebook]:https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb
@end_compatibility
"""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
@tf_export(v1=["train.CheckpointSaverListener"])
| StopAtStepHook |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 168071,
"end": 169538
} | class ____:
def test_commit_hook_is_called_on_commit(self):
data = {}
@task
def my_task():
pass
@my_task.on_commit
def commit(txn):
data["txn"] = txn
state = my_task(return_state=True)
assert state.is_completed()
assert state.name == "Completed"
assert isinstance(data["txn"], Transaction)
def test_does_not_log_rollback_when_no_user_defined_rollback_hooks(self, caplog):
@task
def my_task():
pass
@my_task.on_commit
def commit(txn):
raise Exception("oops")
my_task()
assert "Running rollback hook" not in caplog.text
def test_run_task_in_serializable_transaction(self):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/15503
"""
@task
def my_task():
return get_transaction()
with transaction(
isolation_level=IsolationLevel.SERIALIZABLE,
store=ResultStore(lock_manager=MemoryLockManager()),
):
task_txn = my_task()
assert task_txn is not None
assert isinstance(task_txn.store, ResultStore)
# make sure the task's result store gets the lock manager from the parent transaction
assert task_txn.store.lock_manager == MemoryLockManager()
assert task_txn.isolation_level == IsolationLevel.SERIALIZABLE
| TestTransactions |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 3514,
"end": 4095
} | class ____(SanicException):
"""A base class for other exceptions and should not be called directly."""
def __init__(
self,
message: Optional[Union[str, bytes]] = None,
*,
quiet: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
extra: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, Any]] = None,
) -> None:
super().__init__(
message,
quiet=quiet,
context=context,
extra=extra,
headers=headers,
)
| HTTPException |
python | astropy__astropy | astropy/nddata/tests/test_utils.py | {
"start": 15181,
"end": 25627
} | class ____:
def setup_class(self):
self.data = np.arange(20.0).reshape(5, 4)
self.position = SkyCoord("13h11m29.96s -01d19m18.7s", frame="icrs")
wcs = WCS(naxis=2)
rho = np.pi / 3.0
scale = 0.05 / 3600.0
wcs.wcs.cd = [
[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)],
]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crval = [
self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg),
]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [
3,
3 * u.pixel,
(3, 3),
(3 * u.pixel, 3 * u.pix),
(3.0, 3 * u.pixel),
(2.9, 3.3),
]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2.0, 2.0)
assert c.position_cutout == (1.0, 1.0)
assert c.center_original == (2.0, 2.0)
assert c.center_cutout == (1.0, 1.0)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3 * u.arcsec / (0.1 * u.arcsec / u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1 * u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="trim")
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial")
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial", fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.0
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
@pytest.mark.parametrize(
"position, limit_rounding_method, expected_slices_original",
[
((2, 2), np.ceil, (slice(1, 4), slice(1, 4))),
((2, 2), np.floor, (slice(0, 3), slice(0, 3))),
((1.9, 2.9), np.round, (slice(1, 4), slice(0, 3))),
((2, 2), np.trunc, (slice(0, 3), slice(0, 3))),
],
)
def test_limit_rounding_method(
self, position, limit_rounding_method, expected_slices_original
):
c = Cutout2D(
self.data, position, (3, 3), limit_rounding_method=limit_rounding_method
)
assert c.data.shape == (3, 3)
assert c.slices_original == expected_slices_original
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs, mode="partial")
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs.array_shape == (ysize, xsize)
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs, mode="partial").wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_cutout_with_nddata_as_input(self):
# This is essentially a copy/paste of test_skycoord with the
# input a ccd with wcs attribute instead of passing the
# wcs separately.
ccd = CCDData(data=self.data, wcs=self.wcs, unit="adu")
c = Cutout2D(ccd, self.position, (3, 3))
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_cutout_section(tmp_path):
# Make sure that one can pass ImageHDU.section and CompImageHDU.section
# to Cutout2D
data = np.ones((200, 200))
hdu = fits.ImageHDU(data=data)
hdu.writeto(tmp_path / "uncompressed.fits")
with fits.open(tmp_path / "uncompressed.fits") as hdul:
c = Cutout2D(
hdul[1].section,
(75, 75),
100 * u.pix,
)
# Partial cutout
c = Cutout2D(hdul[1].section, (75, 75), 100 * u.pix, mode="partial")
chdu = fits.CompImageHDU(data=data)
chdu.writeto(tmp_path / "compressed.fits")
with fits.open(tmp_path / "compressed.fits") as hdul:
c = Cutout2D(
hdul[1].section,
(75, 75),
100 * u.pix,
)
# Partial cutout
c = Cutout2D(hdul[1].section, (75, 75), 100 * u.pix, mode="partial")
def test_cutout_section_with_bzero_bscale_blank(tmp_path):
# Make sure that one can pass ImageHDU.section and CompImageHDU.section
# to Cutout2D
data = (np.arange(200 * 200).reshape(200, 200) - 20_000).astype(np.int16)
hdu = fits.ImageHDU(data=data)
hdu._scale_internal(bzero=1.234, bscale=0.0002, blank=32767)
hdu.writeto(tmp_path / "uncompressed.fits")
position, size = (25, 25), 100 * u.pix
with fits.open(tmp_path / "uncompressed.fits") as hdul:
# Partial cutout
c = Cutout2D(hdul[1].section, position, size, mode="partial")
chdu = fits.CompImageHDU(data=data)
chdu._scale_internal(bzero=1.234, bscale=0.0002, blank=32767)
chdu.writeto(tmp_path / "compressed.fits")
with fits.open(tmp_path / "compressed.fits") as hdul:
# Partial cutout
c = Cutout2D(hdul[1].section, position, size, mode="partial")
| TestCutout2D |
python | pytorch__pytorch | test/inductor/test_cuda_repro.py | {
"start": 2351,
"end": 100035
} | class ____(TestCase):
device = "cuda"
common = check_model_cuda
def test_mm_out_dtype_compile(self):
a = torch.randn(1, 3, device="cuda", dtype=torch.float16)
b = torch.randn(3, 2, device="cuda", dtype=torch.float16)
def fn(x, y):
return torch.mm(x, y, out_dtype=torch.float32)
compiled = torch.compile(fn, backend="inductor", fullgraph=True)
result = compiled(a, b)
expected = fn(a, b)
self.assertEqual(result.dtype, expected.dtype)
self.assertEqual(result, expected)
def test_index_put_issue(self):
def forward(
self,
arg76_1,
expand_default,
full_like_default,
_to_copy_default_67,
zeros,
):
sum_sym_int_19 = torch.ops.aten.sum(_to_copy_default_67, [0], True)
view_default_57 = torch.ops.aten.view.default(sum_sym_int_19, [512, 768])
where_self = torch.ops.aten.where.self(
expand_default, view_default_57, full_like_default
)
clone_default_12 = torch.ops.aten.clone.default(zeros)
index_put__default = torch.ops.aten.index_put_.default(
clone_default_12, [arg76_1], where_self, True
)
return (index_put__default,)
inps = [
(torch.Size([512]), torch.int64),
(torch.Size([512, 768]), torch.bool),
(torch.Size([512, 768]), torch.float16),
(torch.Size([4, 512, 768]), torch.float16),
(torch.Size([512, 768]), torch.float16),
]
inps = [torch.zeros(())] + [
torch.ones(shape, dtype=dtype, device="cuda") for (shape, dtype) in inps
]
mod = make_fx(forward)(*inps)
compiled = compile_fx_inner(mod, inps)
compiled(inps)
def test_view_replay_padding_issue_163328(self):
class ReproModule(nn.Module):
def __init__(self):
super().__init__()
self.num_points_out = 120
self.lc_num = 2
input_channels = 16
self.linear_main = nn.Linear(input_channels, self.num_points_out * 2)
self.linear_lc = nn.Linear(input_channels, self.num_points_out * 2)
def forward(self, x: torch.Tensor):
bs, num_lat, num_lon, channels = x.shape
index = num_lat - self.lc_num
main_x = x[:, :index].reshape(bs * index * num_lon, channels)
lc_x = x[:, index:].reshape(bs * self.lc_num * num_lon, channels)
refline = self.linear_main(main_x).reshape(bs, index, num_lon, -1)
lc_refline = self.linear_lc(lc_x).reshape(bs, self.lc_num, num_lon, -1)
base = torch.cat([refline, lc_refline], dim=1).contiguous()
out0 = base.reshape(bs, num_lat, num_lon, self.num_points_out, 2)
out1 = base.reshape(bs, num_lat * num_lon, self.num_points_out * 2)
return {"ten0": out0, "ten1": out1}
torch.manual_seed(0)
model = ReproModule().cuda()
inputs = torch.randn(36, 9, 7, 16, device="cuda", requires_grad=True)
eager_out = model(inputs)
compiled_model = torch.compile(
copy.deepcopy(model),
backend="inductor",
mode="reduce-overhead",
fullgraph=True,
)
compiled_out = compiled_model(inputs)
self.assertEqual(compiled_out["ten0"], eager_out["ten0"])
self.assertEqual(compiled_out["ten1"], eager_out["ten1"])
def test_effn_attn_bias_padding(self):
batch_size, num_heads, seq_len, head_dim = 2, 32, 512, 128
def fn(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
input_tensor: torch.Tensor, # This will be our starting point
):
# Input tensor should be [2, 1, 8192, 1] with appropriate strides
bias = torch.ops.aten.expand(
input_tensor, [2, 32, seq_len, seq_len]
) # Expands with stride pattern [65536, 0, 8, 0]
return torch.ops.aten._scaled_dot_product_efficient_attention(
query,
key,
value,
bias,
compute_log_sumexp=True,
dropout_p=0.0,
is_causal=False,
scale=None,
)
query = torch.randn(batch_size, num_heads, seq_len, head_dim, device="cuda")
key = torch.randn(batch_size, num_heads, seq_len, head_dim, device="cuda")
value = torch.randn(batch_size, num_heads, seq_len, head_dim, device="cuda")
input_tensor = torch.rand([2, 1, seq_len, 1], device="cuda")
out, code = run_and_get_code(torch.compile(fn), query, key, value, input_tensor)
input_tensor2 = torch.rand([2, 32, seq_len, seq_len], device="cuda").copy_(
input_tensor
)
# even though the last dim is broadcasted, needs stride 1 for alignment
# but dim 1 stride can be 0
FileCheck().check("buf0").check("(262144, 0, 512, 1").run(code[0])
# dont check rng state
self.assertEqual(out[:2], fn(query, key, value, input_tensor2)[:2])
@skipIfRocmArch(MI350_ARCH)
def test_effn_attn_bias_padding_misaligned(self):
seqlen_start = 1008
for offset in range(-1, 2):
seqlen = seqlen_start + offset
torch._dynamo.reset()
bsz = 32
q = torch.randn(bsz, 16, seqlen, 64, dtype=torch.bfloat16, device="cuda")
k = torch.randn(bsz, 16, seqlen, 64, dtype=torch.bfloat16, device="cuda")
v = torch.randn(bsz, 16, seqlen, 64, dtype=torch.bfloat16, device="cuda")
mask = torch.ones([bsz, 1, seqlen, seqlen], dtype=torch.bool, device="cuda")
inputs = [q, k, v, mask]
def f(q, k, v, mask):
with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION):
return F.scaled_dot_product_attention(
q, k, v, attn_mask=mask, dropout_p=0.0
)
f_compiled = torch.compile(f)
out, code = run_and_get_code(f_compiled, *inputs)
# padded bias should have an expanded dim
FileCheck().check("buf0 =").check_same(", 0, ").run(code[0])
# single fused padded kernel
FileCheck().check_count("empty_strided_cuda(", 1, exactly=True).check(
"return"
).run(code[0])
self.assertEqual(out, f(*inputs))
def test_input_channels_last(self):
m = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 1, 1),
ToTuple(),
).cuda()
inp = torch.randn([2, 3, 16, 16]).to(memory_format=torch.channels_last).cuda()
self.common(
m,
(inp,),
check_lowp=False,
)
@torch.compile()
def foo(m, inp):
return m(inp)
self.assertTrue(foo(m, inp)[0].is_contiguous(memory_format=torch.channels_last))
# https://github.com/pytorch/torchdynamo/issues/1681#issuecomment-1283433527
def test_unspec_inputs_interop(self):
class Repro(torch.nn.Module):
def forward(self, x, y):
unsqueeze = torch.ops.aten.unsqueeze.default(x, 4)
permute = torch.ops.aten.permute.default(unsqueeze, [0, 1, 2, 4, 3])
add = torch.ops.aten.add.Tensor(y, 1)
return [permute, add]
inps = [
rand_strided((12, 3, 512, 64), (64, 196608, 768, 1), torch.float32, "cuda"),
rand_strided((), (), torch.int64, "cpu"),
]
mod = make_fx(Repro().to(device="cuda"))(*inps)
compiled = compile_fx_inner(mod, inps)
compiled(inps)
@unittest.skipIf(
IS_FBCODE, "RuntimeError: Triton Error [CUDA]: invalid device context"
)
def test_backward_context(self):
def fn(x):
return x * 3
x = torch.randn(4, device="cuda", requires_grad=True)
gO = torch.rand_like(x)
opt_fn = torch.compile(fn)
out = opt_fn(x)
out.backward(gO)
@config.patch(fallback_random=True)
def test_dtype_factory_issue(self):
def forward():
randn = torch.ops.aten.randn.default(
[12, 64, 1, 64],
dtype=torch.float32,
device=torch.device(type="cuda", index=0),
pin_memory=False,
)
unsqueeze_default_2 = torch.ops.aten.unsqueeze.default(randn, -1)
return (unsqueeze_default_2,)
mod = make_fx(forward)()
compiled = compile_fx_inner(mod, ())
assert compiled([])[0].device.type == "cuda"
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_no_device_idx_repro_cudagraphs(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self):
full = torch.ops.aten.full.default(
[8, 512],
1,
dtype=torch.float32,
layout=torch.strided,
device=torch.device(type="cuda", index=0),
pin_memory=False,
)
full_1 = torch.ops.aten.full.default(
[8, 512],
0,
dtype=torch.int64,
layout=torch.strided,
device=torch.device(type="cuda", index=0),
pin_memory=False,
)
return (full_1, full)
self.common(Repro(), ())
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_expanded_inputs_cudagraphs(self):
@torch.compile(backend="inductor")
def fn(x, y):
return x + y
inputs = (
rand_strided((5, 5, 5, 5), (0, 5, 0, 1), device="cuda"),
rand_strided((5, 5, 5, 5), (0, 5, 0, 1), device="cuda"),
)
self.assertTrue(same(fn(*inputs), inputs[0] + inputs[1]))
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(
automatic_dynamic_shapes=True,
assume_static_by_default=False,
)
def test_dynamic_to_static_cudagraphs(self):
for b in [False, True]:
with config.patch({"triton.cudagraph_trees": b}):
@torch.compile(backend="inductor")
def fn(x, y):
r = x + y
return r, r.size(0)
inputs = (
torch.randn((5, 5), device="cuda"),
torch.randn((5, 5), device="cuda"),
)
self.assertTrue(same(fn(*inputs), (inputs[0] + inputs[1], 5)))
inputs = (
torch.randn((6, 6), device="cuda"),
torch.randn((6, 6), device="cuda"),
)
self.assertTrue(same(fn(*inputs), (inputs[0] + inputs[1], 6)))
def _test_split_reduction_impl(self, x):
def max(x):
return torch.max(x)
max_c = torch.compile(max)
out, code = run_and_get_code(max_c, x)
self.assertEqual(out, max(x))
if DO_PERF_TEST:
ms_c = benchmarker.benchmark_gpu(lambda: max_c(x))
ms_eager = benchmarker.benchmark_gpu(lambda: max(x))
print(f"compile {ms_c=:.03f}, eager {ms_eager=:.03f}")
def test_split_reduction_transposed(self):
x = torch.randn(4096, 8192, dtype=torch.bfloat16, device="cuda")
x = x.t().contiguous().t()
self._test_split_reduction_impl(x)
def test_split_reduction_channels_last(self):
x = torch.randn(4096, 8192, dtype=torch.bfloat16, device="cuda")
x = x.reshape([256, 256, 256, 2]).to(memory_format=torch.channels_last)
self._test_split_reduction_impl(x)
@config.patch({"emulate_precision_casts": True})
def test_bool_emulate_low_precision(self):
from torch import device
inf = float("inf")
def forward():
full_1 = torch.ops.aten.full.default(
[6, 6],
1,
dtype=torch.float32,
layout=torch.strided,
device=device(type="cpu"),
pin_memory=False,
)
device_put_3 = torch.ops.prims.device_put.default(
full_1, device(type="cuda", index=0)
)
full_1 = None
convert_element_type_40 = torch.ops.prims.convert_element_type.default(
device_put_3, torch.bool
)
device_put_3 = None
unsqueeze_4 = torch.ops.aten.unsqueeze.default(convert_element_type_40, 1)
convert_element_type_40 = None
unsqueeze_5 = torch.ops.aten.unsqueeze.default(unsqueeze_4, 3)
unsqueeze_4 = None
expand = torch.ops.aten.expand.default(unsqueeze_5, [-1, 256, -1, 256])
unsqueeze_5 = None
clone = torch.ops.aten.clone.default(
expand, memory_format=torch.contiguous_format
)
expand = None
view_15 = torch.ops.aten.reshape.default(clone, [1536, 1536])
clone = None
scalar_tensor = torch.ops.aten.scalar_tensor.default(
-inf, dtype=torch.float16, device=device(type="cuda", index=0)
)
scalar_tensor_1 = torch.ops.aten.scalar_tensor.default(
0.0,
dtype=torch.float16,
layout=torch.strided,
device=device(type="cuda", index=0),
)
where = torch.ops.aten.where.self(view_15, scalar_tensor_1, scalar_tensor)
view_15 = scalar_tensor_1 = scalar_tensor = None
return where
from torch._inductor import config
config.emulate_precision_casts = True
self.assertEqual(torch.compile(forward)(), forward())
@config.patch({"emulate_precision_casts": True})
def test_emulate_low_precision(self):
def foo(x):
return torch.nn.functional.gelu(x) * 10.0
inp = torch.rand([32], device="cuda", requires_grad=True, dtype=torch.bfloat16)
out, codes = run_fw_bw_and_get_code(lambda: torch.compile(foo)(inp))
# fwd, backward
for code in codes:
f = FileCheck()
# in eager, there are two down casts
for _ in range(2):
f.check(".to(tl.bfloat16)").check_next(".to(tl.float32)")
f.run(code)
self.assertEqual(foo(inp), out)
# TODO: Abstract this out, test more extensively
@torch._dynamo.config.patch(assume_static_by_default=False)
def test_dynamic_shapes(self):
torch._dynamo.reset() # Needed since everywhere else uses "inductor"
def f(x):
return x.cos().view(x.shape).sin()
cnts = torch._dynamo.testing.CompileCounterWithBackend("inductor")
f2 = torch.compile(f, backend=cnts)
f2(torch.randn(32))
inp = torch.randn(16)
real_out = f(inp)
compiled_out = f2(inp)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(real_out, compiled_out)
torch._dynamo.reset()
@config.patch({"triton.cudagraphs": True, "size_asserts": False})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_expanded_inputs_cudagraphs_no_size_asserts(self):
@torch.compile(backend="inductor")
def fn(x, y):
return x + y
inputs = (
rand_strided((5, 5, 5, 5), (0, 5, 0, 1), device="cuda"),
rand_strided((5, 5, 5, 5), (0, 5, 0, 1), device="cuda"),
)
self.assertTrue(same(fn(*inputs), inputs[0] + inputs[1]))
@config.patch({"triton.cudagraph_trees": False})
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_inplace_updates_cudagraphs(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = torch.nn.Parameter(
torch.randn(10, 20, requires_grad=True)
)
def forward(self, x):
x = torch.matmul(x, self.weight1)
return x
from copy import deepcopy
model = Repro().cuda()
model_ref = deepcopy(model)
model_opt = torch.compile(model, backend="inductor")
input = torch.randn(10, 10, device="cuda", requires_grad=True)
for _ in range(2):
output_ref = model_ref(input)
output_res = model_opt(input)
output_ref.sum().backward()
output_res.sum().backward()
for p_ref, p_res in zip(model_ref.parameters(), model_opt.parameters()):
self.assertEqual(p_ref.grad, p_res.grad)
with torch.no_grad():
for param in model_ref.parameters():
param.add_(1.0)
for param in model_opt.parameters():
param.add_(1.0)
# https://github.com/pytorch/torchdynamo/issues/1850
def test_inductor_output_aliases_intermediate(self):
def foo(x):
out = x + x
return out.t()
foo_opt = torch.compile(foo, backend="inductor")
inpt = torch.randn(10, 10, device="cuda", requires_grad=True)
# TODO: this is broken, fix later
# out = foo_opt(inpt)
# out.add_(2)
out_ref = foo(inpt)
out_ref.add_(2)
# self.assertEqual(out_ref, out)
def test_accuracy_issue1(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(
in_features=768, out_features=2, bias=True
)
def forward(self, start_positions: torch.Tensor, x: torch.Tensor):
linear = self.linear(x)
split = linear.split(1, dim=-1)
getitem = split[0]
squeeze = getitem.squeeze(-1)
clamp = start_positions.clamp(0, 128)
cross_entropy = torch.nn.functional.cross_entropy(
squeeze, clamp, None, None, 128, None, "mean", 0.0
)
return cross_entropy
mod = Repro().cuda()
opt_mod = torch.compile(mod, backend="inductor")
mod.eval()
opt_mod.eval()
args = [
((1,), (1,), torch.int64, "cuda", False),
((1, 128, 768), (98304, 768, 1), torch.float32, "cuda", True),
]
args = [
rand_strided(sh, st, dt, dev).requires_grad_(rg)
for (sh, st, dt, dev, rg) in args
]
with torch.cuda.amp.autocast(enabled=False):
assert same_two_models(mod, opt_mod, args), "Dynamo failed"
@config.patch(allow_buffer_reuse=False)
def test_issue103461(self):
def forward(add_1):
var_mean = torch.ops.aten.var_mean.correction(
add_1, [2], correction=0, keepdim=True
)
getitem_1 = var_mean[1]
return getitem_1
x = torch.randn(1, 8, 768, device="cuda")
correct = forward(x)
actual = torch.compile(forward, fullgraph=True)(x)
self.assertEqual(actual, correct)
def test_full_copy(self):
def forward(x):
full_10 = torch.ops.aten.full.default(
[204, 204, 28],
0,
dtype=torch.float64,
layout=torch.strided,
device="cuda",
pin_memory=False,
)
return x + full_10.to("cpu")
o = torch.randn([204, 204, 28], dtype=torch.float64)
correct = forward(o)
actual = torch.compile(forward, fullgraph=True)(o)
self.assertEqual(actual, correct)
def test_autotune_inplace_kernel(self):
"""
This UT tests autotune on an inplace kernel. The autotune should not contaminate
the input buffers when tuning with multiple configs. For more details, refer to
https://github.com/triton-lang/triton/issues/781
https://github.com/pytorch/torchdynamo/issues/1670
"""
from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
from torch._inductor.runtime.hints import AttrsDescriptorWrapper, HeuristicType
from torch._inductor.runtime.triton_heuristics import CachingAutotuner
from torch._inductor.utils import triton_version_uses_attrs_dict
def autotune(configs, meta):
def decorator(fn):
if triton_version_uses_attrs_dict():
# Newer versions of Triton puts constexpr in signature
# Ref: https://github.com/pytorch/pytorch/pull/145051
meta["signature"]["XBLOCK"] = "constexpr"
return CachingAutotuner(
# force autotune by setting save_cache_hook to False
fn,
triton_meta=meta,
configs=configs,
save_cache_hook=False,
mutated_arg_names=["in_out_ptr0"],
reset_to_zero_arg_names=[],
optimize_mem=True,
heuristic_type=HeuristicType.POINTWISE,
inductor_meta={"grid_type": "Grid1D"},
)
return decorator
@autotune(
configs=[
triton.Config({"XBLOCK": 1}),
triton.Config({"XBLOCK": 2}),
],
meta={
"signature": {
"in_out_ptr0": "*fp32",
"in_ptr0": "*fp32",
"xnumel": "i32",
},
"device": DeviceProperties.create(torch.device("cuda")),
"configs": [
AttrsDescriptorWrapper(divisible_by_16=(0, 1), equal_to_1=())
],
"constants": {},
},
)
@triton.jit
def kernel(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
pid = tl.program_id(0)
block_start = pid * XBLOCK
offsets = block_start + tl.arange(0, XBLOCK)
mask = offsets < xnumel
x = tl.load(in_out_ptr0 + offsets, mask=mask, other=0.0)
y = tl.load(in_ptr0 + offsets, mask=mask, other=0.0)
output = x + y
tl.store(in_out_ptr0 + offsets, output, mask=mask)
xnumel = 384
in0 = rand_strided((xnumel,), (1,), device="cuda", dtype=torch.float32)
inout1 = rand_strided((xnumel,), (1,), device="cuda", dtype=torch.float32)
inout2 = inout1.clone()
stream0 = get_cuda_stream(0)
kernel.run(inout1, in0, xnumel, stream=stream0)
kernel.run(inout2, in0, xnumel, stream=stream0)
assert same(inout1, inout2, tol=0.001, equal_nan=True), (
"failed autotune with inplace kernel"
)
def test_sort_stride_issue(self):
# This minified testcase comes from detectron2_maskrcnn_r_50_fpn
# There was a false error from our size_assert code
@torch.compile(fullgraph=True)
def forward(pred_objectness_logits_3_: torch.Tensor):
sort_3 = pred_objectness_logits_3_.sort(descending=True, dim=1)
getitem_12 = sort_3[0]
return getitem_12
args = [((1, 100), (0, 1), torch.float16, "cuda", False)]
args = [
rand_strided(sh, st, dt, dev).requires_grad_(rg)
for (sh, st, dt, dev, rg) in args
]
result = forward(*args)
assert same(result, torch.sort(args[0], descending=True, dim=1)[0])
def test_scalar_triton_index(self):
# The indirect indexing via a scalar like below used to lead to
# bad triton code that made triton segfault when compiling.
# See https://github.com/pytorch/torchdynamo/issues/1515
def fn(a):
zero = torch.zeros((16,), device=a.device, dtype=torch.int64)
return (a[zero],)
a = torch.randn((8,), dtype=torch.float32, device="cuda")
fn_optimized = torch.compile(fn, backend="inductor")
assert same(fn(a), fn_optimized(a))
def test_indirect_indexing_dense_mask(self):
def fn(x, y):
ne = torch.ops.aten.ne.Scalar(x, 1)
sum_1 = torch.ops.aten.sum.dim_IntList(ne, [1])
sub = torch.ops.aten.sub.Tensor(sum_1, 1)
unsqueeze = torch.ops.aten.unsqueeze.default(sub, -1)
gather = torch.ops.aten.gather.default(x, 1, unsqueeze)
squeeze = torch.ops.aten.squeeze.default(gather)
out = torch.ops.aten.multiply(y, squeeze)
return (out,)
a = torch.zeros((1, 128), dtype=torch.int64, device="cuda")
b = torch.zeros((1, 128), dtype=torch.int64, device="cuda")
fn_optimized = torch.compile(fn, backend="inductor")
assert same(fn(a, b), fn_optimized(a, b))
def test_simplify_dims(self):
def fn(a):
return (a + 1,)
self.common(fn, (torch.randn(2, 3, 10, 5, 6, device="cuda")[:, :, 2::2, :, :],))
@config.patch(permute_fusion=True)
def test_permute_fusion(self):
class Repro(torch.nn.Module):
def forward(self, view, reshape_2):
permute = view.permute(0, 2, 1)
view = None
reshape = torch.reshape(permute, (-1, 642))
bmm = torch.bmm(permute, reshape_2)
return (bmm,)
args = [
((1024, 642, 160), (102720, 160, 1), torch.float32, "cuda", True),
((1024, 642, 20), (12840, 20, 1), torch.float32, "cuda", True),
]
args = [
rand_strided(sh, st, dt, dev).requires_grad_(rg)
for (sh, st, dt, dev, rg) in args
]
mod = Repro()
opt_mod = torch.compile(mod, backend="inductor")
ref = mod(*args)
res = opt_mod(*args)
self.assertTrue(same(ref, res))
@config.patch({"triton.autotune_pointwise": True})
def test_inplace_add_alpha_autotune(self):
def fn(x, y):
aten.add_.Tensor(x, y, alpha=0.55)
return (x,)
x1 = torch.zeros(2, 3, 4, 10, device="cuda")
x2 = torch.zeros(2, 3, 4, 10, device="cuda")
x3 = torch.zeros(2, 3, 4, 10, device="cuda")
y = torch.randn(2, 3, 4, 10, device="cuda").to(
memory_format=torch.channels_last
)
fn_fx = make_fx(fn)(x1, y)
fn_compiled = compile_fx_inner(fn_fx, [x1, y])
fn(x2, y)
fn_compiled([x3, y])
assert same(x2, x3)
@config.patch({"triton.autotune_pointwise": True})
def test_inplace_buffer_autotune(self):
def foo(x, y, z):
a = x @ y
return a.unsqueeze(0).unsqueeze(0) + z
x = torch.zeros(5, 5, device="cuda")
y = torch.zeros(5, 5, device="cuda")
z = torch.zeros(1, 1, 5, 5, device="cuda").to(memory_format=torch.channels_last)
self.common(
foo,
(x, y, z),
check_lowp=False,
)
def test_memory_history_inductor(self):
def called_inside_compile(x, w, b):
a = x @ w + b
return torch.sigmoid(a)
@torch.compile
def fn(x, w, b):
x = called_inside_compile(x, w, b)
return called_inside_compile(x, w, b)
w = torch.rand(3, 3, device="cuda")
b = torch.rand(3, device="cuda")
x = torch.rand(3, device="cuda")
try:
torch.cuda.memory.empty_cache()
torch.cuda.memory._record_memory_history(True)
r = fn(x, w, b)
finally:
torch.cuda.memory._record_memory_history(False)
snapshot = str(torch.cuda.memory._snapshot())
self.assertTrue("called_inside_compile" in snapshot)
def test_negative_arange_dynamic_shapes(self):
# Repro from alibi relative encodings
def sign(x):
return (x > 0) - (x < 0)
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
nheads = 16
start = math.log2(0.5)
end = math.log2(1 / (2**8))
self.scales = nn.Buffer(
2
** torch.arange(
start,
end + 1e-6 * sign(end - start),
(end - start) / (nheads - 1),
).view(1, nheads, 1, 1),
)
self.emb = nn.Embedding(1024, 256)
self.dec_layer = nn.TransformerDecoderLayer(
256, 16, 512, batch_first=True, norm_first=True
)
self.head = nn.Linear(256, 1024)
def forward(self, enc_out: torch.Tensor, dec_in: torch.Tensor):
padmask = dec_in == 0
dec_mask = padmask.unsqueeze(-1) == padmask.unsqueeze(-2)
dec_mask = dec_mask.to(dtype=torch.float32)
dec_mask = dec_mask.tril(diagonal=0).cuda()
q_pos = torch.arange(dec_in.size(1), dtype=torch.long, device="cuda")
k_pos = torch.arange(dec_in.size(1), dtype=torch.long, device="cuda")
rel_pos = k_pos[None, :] - q_pos[:, None]
values = rel_pos.abs().neg().unsqueeze(0).unsqueeze(0)
dec_bias = values * self.scales
dec_bias.tril_(diagonal=0)
dec_mask = dec_mask + dec_bias[0]
out = self.emb(dec_in)
out = self.dec_layer(out, enc_out, tgt_mask=dec_mask)
return self.head(out)
mod = Repro().cuda()
opt_mod = torch.compile(mod, backend="inductor", dynamic=True)
mod.eval()
opt_mod.eval()
enc_out = torch.rand(1, 512, 256).cuda()
dec_inputs = [
torch.randint(0, 512, (1, i + 1), dtype=torch.long).cuda() for i in range(8)
]
for dec_inp in dec_inputs:
assert same_two_models(mod, opt_mod, [enc_out, dec_inp], only_fwd=True), (
"Inductor with dynamic shapes failed"
)
def test_issue97695_1input(self):
def fn(arg3_1, relu, permute_1):
addmm_1 = torch.ops.aten.addmm.default(arg3_1, relu, permute_1)
cat_2 = torch.ops.aten.cat.default([addmm_1], 1)
return (cat_2,)
args = [
((96,), (1,), torch.float32, "cuda"),
((10, 256), (256, 1), torch.float32, "cuda"),
((256, 96), (1, 256), torch.float32, "cuda"),
]
args = [rand_strided(sh, st, dt, dev) for (sh, st, dt, dev) in args]
correct = fn(*args)
mod = make_fx(fn, tracing_mode="real")(*args)
compiled = compile_fx_inner(mod, args)
ref = compiled(list(args))
assert same(ref, correct)
ref = torch.compile(fn, fullgraph=True)(*args)
assert same(ref, correct)
def test_issue_103924(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.temperature = 1
self.layer = torch.nn.Softmax(dim=1)
def forward(self, x):
n_samples, _ = x.shape
y = 1.0 * torch.ones(n_samples, dtype=x.dtype, device=x.device)
inp = x / y[..., None]
return self.layer(inp)
x = torch.rand([4, 4], device="cuda")
m = MyModule()
opt_m = torch.compile(backend="inductor")(m)
self.assertEqual(opt_m(x), m(x))
def test_issue97695_2input(self):
def fn(arg3_1, arg3_2, relu, permute_1):
addmm_1 = torch.ops.aten.addmm.default(arg3_1, relu, permute_1)
addmm_2 = torch.ops.aten.addmm.default(arg3_2, relu, permute_1)
cat_2 = torch.ops.aten.cat.default([addmm_1, addmm_2], 1)
return (cat_2,)
args = [
((96,), (1,), torch.float32, "cuda"),
((96,), (1,), torch.float32, "cuda"),
((10, 256), (256, 1), torch.float32, "cuda"),
((256, 96), (1, 256), torch.float32, "cuda"),
]
args = [rand_strided(sh, st, dt, dev) for (sh, st, dt, dev) in args]
correct = fn(*args)
ref = torch.compile(fn, fullgraph=True)(*args)
assert same(ref, correct)
def test_scatter_index_not_wrapped(self):
src = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=self.device)
index = torch.tensor([0, 1, 0, 1, 2, 0], device=self.device)
input = torch.tensor([1.0, 2.0, 3.0, 4.0], device=self.device)
compiled_sr = torch.compile(torch.scatter_reduce)
input_orig = input.clone()
out, code = run_and_get_code(compiled_sr, input, 0, index, src, "sum")
# tmp0 - not wrapping of negative numbers
FileCheck().check("tl.device_assert(((0 <= tmp0) & (tmp0 < 4))").check_next(
"atomic_add"
).run(code[0])
self.assertEqual(
out, torch.scatter_reduce(input_orig.clone(), 0, index, src, "sum")
)
def test_normalize_norm_leq_one(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.normalize(x, dim=-1)
inp = torch.tensor([[3.799999, 0.0, 0.0]], device="cuda", dtype=torch.float32)
compiled = torch.compile(fn, backend="inductor", fullgraph=True)
out = compiled(inp)
norm = out.norm(dim=-1)
self.assertTrue(
torch.all(norm <= 1.0), f"expected norm <= 1.0 but got {norm.item()}"
)
def test_libdevice_routing(self):
def foo(x):
return x.exp()
inp = torch.ones(64, device="cuda").to(torch.float64)
out, code = run_and_get_code(torch.compile(foo), inp)
FileCheck().check("libdevice.exp").run(code[0])
self.assertEqual(foo(inp), out)
inp = inp.to(torch.float)
out, code = run_and_get_code(torch.compile(foo), inp)
FileCheck().check_not("tl_math.exp").check("libdevice.exp").run(code[0])
self.assertEqual(foo(inp), out)
def foo(x):
return x.sigmoid()
inp = torch.ones(64, device="cuda").to(torch.float64)
out, code = run_and_get_code(torch.compile(foo), inp)
FileCheck().check("libdevice.exp").run(code[0])
self.assertEqual(foo(inp), out)
def test_uint_view_copy(self):
@torch.compile
def view_copy(target, source):
assert target.dtype == torch.bfloat16
assert source.dtype == torch.uint16
target.view(torch.uint16).copy_(source)
target = torch.ones(1024, dtype=torch.bfloat16, device="cuda")
source = torch.full_like(target, 4, dtype=torch.uint16)
out = target.view(torch.uint16).copy_(source).clone()
view_copy(target, source)
self.assertEqual(out, target.view(torch.uint16))
def test_embedding_var_mean(self):
def forward(arg0_1):
full = torch.ops.aten.full.default(
[1, 2048],
1,
dtype=torch.float32,
layout=torch.strided,
device=torch.device(type="cuda", index=0),
pin_memory=False,
)
convert_element_type_1 = torch.ops.prims.convert_element_type.default(
full, torch.int64
)
cumsum = torch.ops.aten.cumsum.default(convert_element_type_1, 1)
mul = torch.ops.aten.mul.Tensor(cumsum, convert_element_type_1)
sub_1 = torch.ops.aten.sub.Tensor(mul, 1)
slice_5 = torch.ops.aten.slice.Tensor(sub_1, 0, 0, 9223372036854775807)
slice_6 = torch.ops.aten.slice.Tensor(slice_5, 1, 0, 9223372036854775807)
add_2 = torch.ops.aten.add.Tensor(slice_6, 2)
embedding_1 = torch.ops.aten.embedding.default(arg0_1, add_2)
var_mean = torch.ops.aten.var_mean.correction(
embedding_1, [2], correction=0, keepdim=True
)
return [var_mean[0], var_mean[1], add_2]
emb = torch.randn([2050, 768], device="cuda")
gm = make_fx(forward)(emb)
opt = torch._inductor.compile_fx.compile_fx_inner(gm, [emb])
opt([emb])
torch.cuda.synchronize()
def test_deterministic_algorithms(self):
N = 10000
@torch.compile
def fn(idx, values):
x = torch.zeros(1, device="cuda")
x[idx] += values
return x
idx = torch.zeros(N, dtype=torch.int64, device="cuda")
values = torch.randn(N, device="cuda")
r0 = fn(idx, values)
with DeterministicGuard(True):
r1 = fn(idx, values)
for _ in range(10):
rn = fn(idx, values)
self.assertEqual(r1, rn, atol=0, rtol=0)
# https://github.com/pytorch/pytorch/issues/96406
def test_linear_cpu_input(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(4, 4)
def forward(self, data):
data = data.to("cuda")
return self.linear(data)
mod = Model().cuda().eval()
with torch.no_grad():
self.common(mod, (torch.randn(4, 4),))
@config.patch({"fallback_random": True, "triton.cudagraphs": True})
def test_xlnet_lm_stride_repro(self):
class Repro(nn.Module):
def __init__(self) -> None:
super().__init__()
self.dropout = nn.Dropout(p=0.1, inplace=False)
def forward(self, x):
y = torch._C._nn.gelu(x)
return self.dropout(y)
mod = Repro()
x = torch.randn((512, 1, 4096), requires_grad=True, device="cuda")
y = torch.compile(mod)(x)
# Inductor claims the output layout of gelu's saved variable for
# backwards will be (4096, 4096, 1) but in actuality it is (4096,
# 2097152, 1). Fortunately this doesn't actually matter in practice.
y.sum().backward()
def test_lookup_seed_backward(self):
@torch.compile(fullgraph=True)
def forward(inductor_seeds, mul_4, view_15):
inductor_lookup_seed_2 = torch.ops.prims.inductor_lookup_seed.default(
inductor_seeds, 2
)
inductor_random_2 = torch.ops.prims.inductor_random.default(
[2, 512, 768], inductor_lookup_seed_2, "rand"
)
gt_2 = torch.ops.aten.gt.Scalar(inductor_random_2, 0.1)
mul_7 = torch.ops.aten.mul.Tensor(gt_2, view_15)
mul_8 = torch.ops.aten.mul.Tensor(mul_7, 1.1111111111111112)
add_5 = torch.ops.aten.add.Tensor(mul_8, mul_4)
var_mean_1 = torch.ops.aten.var_mean.correction(
add_5, [2], correction=0, keepdim=True
)
getitem_3 = var_mean_1[1]
sub_3 = torch.ops.aten.sub.Tensor(add_5, getitem_3)
return (sub_3,)
buf0 = torch.zeros((37,), dtype=torch.int64, device="cuda")
buf1 = torch.zeros((2, 512, 768), device="cuda")
buf2 = torch.zeros((2, 512, 768), device="cuda")
forward(buf0, buf1, buf2)
def test_issue100806(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 20)
self.linear2 = torch.nn.Linear(20, 30)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = torch.cat((x, x), dim=1)
x = x.view(-1, 2, 30)
x = x[:, 1, :]
x = self.relu(x)
return x
device = "cuda"
batch_size = 2
x = torch.randn(batch_size, 10).to(device)
func = Model().to(device)
with torch.no_grad():
func.train(False)
jit_func = torch.compile(func)
res1 = func(x)
res2 = jit_func(x)
self.assertEqual(res1, res2)
def test_issue103481(self):
def fn(x, y):
# NOTE: 6 dimensions is important! does not fail for 5 dimensions
mean = torch.mean(x, [2, 3, 4, 5], keepdim=True)
add = mean + y
return add
x = torch.rand(4, 4, 4, 4, 4, 4, device="cuda")
y = torch.rand((), device="cuda")
expect = fn(x, y)
opt_fn = torch.compile(fn)
actual = opt_fn(x, y)
self.assertEqual(expect, actual)
@config.patch({"triton.dense_indexing": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_bucketize_dynamic_dense(self):
"""
Make sure that ops.bucketize() can handle dense_indexing, which previously
caused issues due to incorrect handling of the size of offsets.
"""
def fn(values, offsets):
return torch.bucketize(values, offsets)
values = torch.rand((64, 64), device="cuda")
offsets = torch.tensor([0.05, 0.1, 0.5, 0.8, 0.85, 0.95], device="cuda")
expect = fn(values, offsets)
opt_fn = torch.compile(fn, dynamic=True)
actual = opt_fn(values, offsets)
self.assertEqual(expect, actual)
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@config.patch(
{
"max_autotune_gemm_backends": "TRITON",
"triton.disallow_failing_autotune_kernels_TESTING_ONLY": True,
"compile_threads": 1,
}
)
def test_bucketize_epilogue(self):
"""
See https://github.com/pytorch/pytorch/issues/148764.
Make sure that when torch.bucketize appears as an epilogue, the codegen is valid.
Note: during autotuning, there's also the option to _not_ do the fusion.
So if you run the test with standard configs, the fused kernel would fail during
autotuning, and another non-fused kernel would be selected (and Inductor would
throw some errors, but the test would pass)
So we set disallow_failing_autotune_kernels_TESTING_ONLY=True to prevent the
autotuner from catching failures. And set compile_threads=1 so that compile
failures aren't caught by the asyn runner infra.
"""
def fn(x: torch.Tensor, y: torch.Tensor, buckets: torch.Tensor) -> torch.Tensor:
z = torch.mm(x, y)
return torch.bucketize(z, buckets)
buckets = torch.arange(-100, 100, 10, device="cuda")
x = torch.randn(64, 64, device="cuda").clamp(-99, 99)
y = torch.randn(64, 64, device="cuda").clamp(-99, 99)
opt_fn = torch.compile(fn, mode="max-autotune")
expected = fn(x, y, buckets)
actual = opt_fn(x, y, buckets)
self.assertEqual(expected, actual)
def test_float64_constants(self):
def fn():
# NOTE: tensors of all the same value are constant folded, so we
# need a tensor with two distinct values
a = torch.tensor([1 / 10, 2 / 10], dtype=torch.float64, device="cuda")
return a * 2e50
cfn = torch.compile(fn)
expect = fn()
actual = cfn()
self.assertEqual(expect, actual, atol=0, rtol=0)
def test_issue104759(self):
def fn(arg7_1, add_1, permute_2, select_scatter, slice_8):
slice_scatter_4 = torch.ops.aten.slice_scatter.default(
permute_2, select_scatter, 0, 1, 9223372036854775807
)
permute_3 = torch.ops.aten.permute.default(slice_scatter_4, [1, 3, 0, 2, 4])
view_6 = torch.ops.aten.view.default(permute_3, [1, 1000, 48])
view_7 = torch.ops.aten.view.default(view_6, [1000, 48])
view_8 = torch.ops.aten.view.default(view_7, [1, 1000, 48])
view_9 = torch.ops.aten.view.default(view_8, [1, 1000, 3, 4, 4])
permute_4 = torch.ops.aten.permute.default(view_9, [2, 0, 3, 1, 4])
slice_7 = torch.ops.aten.slice.Tensor(permute_4, 0, 1, 9223372036854775807)
slice_scatter_5 = torch.ops.aten.slice_scatter.default(
slice_8, slice_7, 4, 0, 9223372036854775807
)
slice_scatter_6 = torch.ops.aten.slice_scatter.default(
arg7_1, slice_scatter_5, 3, 0, 1000
)
mul_8 = torch.ops.aten.mul.Scalar(add_1, 0.7071067811865476)
slice_9 = torch.ops.aten.slice.Tensor(slice_scatter_6, 3, 0, 1000)
slice_10 = torch.ops.aten.slice.Tensor(slice_9, 4, 0, 9223372036854775807)
select_2 = torch.ops.aten.select.int(slice_10, 0, 0)
permute_5 = torch.ops.aten.permute.default(select_2, [0, 1, 3, 2])
mul_9 = torch.ops.aten.mul.Scalar(permute_5, 0.7071067811865476)
expand = torch.ops.aten.expand.default(mul_8, [1, 4, 1000, 4])
view_10 = torch.ops.aten.view.default(expand, [4, 1000, 4])
expand_1 = torch.ops.aten.expand.default(mul_9, [1, 4, 4, 1000])
view_11 = torch.ops.aten.view.default(expand_1, [4, 4, 1000])
bmm = torch.ops.aten.bmm.default(view_10, view_11)
return (bmm,)
args = []
args.append(torch.randn((2, 1, 4, 1200, 4), dtype=torch.float16, device="cuda"))
args.append(
rand_strided(
(1, 4, 1000, 4), (16000, 4, 16, 1), dtype=torch.float16, device="cuda"
)
)
args.append(
rand_strided(
(3, 1, 4, 1000, 4),
(16, 48000, 4, 48, 1),
dtype=torch.float16,
device="cuda",
)
)
args.append(
rand_strided(
(2, 1, 4, 1000, 4),
(16, 48000, 4, 48, 1),
dtype=torch.float16,
device="cuda",
)
)
args.append(
rand_strided(
(2, 1, 4, 1000, 4),
(19200, 19200, 4800, 4, 1),
dtype=torch.float16,
device="cuda",
)
)
correct = fn(*args)
mod = make_fx(fn, tracing_mode="real")(*args)
compiled = compile_fx_inner(mod, args)
ref = compiled(list(args))
assert same(ref, correct)
@config.patch({"triton.cudagraphs": True})
def test_index_put_inplace_cudagraph(self):
def fn(x, y, z):
x = torch.zeros_like(x)
return x.index_put_([y], z, True)
x = torch.zeros((512, 512), device="cuda", dtype=torch.bool)
y = torch.zeros((512,), device="cuda", dtype=torch.int64)
z = torch.ones((512, 512), device="cuda", dtype=torch.bool)
opt_fn = torch.compile(fn, backend="inductor")
ref = fn(x, y, z)
# run it twice to test cuda graph issue
res = opt_fn(x, y, z)
res = opt_fn(x, y, z)
self.assertEqual(ref, res)
@config.patch({"triton.cudagraphs": True})
@config.patch({"fx_graph_cache": True})
def test_index_put_cudagraph(self):
for _ in range(2):
def fn(x, y, z):
x = torch.zeros_like(x)
return x.index_put([y], z, True)
x = torch.zeros((512, 512), device="cuda", dtype=torch.bool)
y = torch.zeros((512,), device="cuda", dtype=torch.int64)
z = torch.ones((512, 512), device="cuda", dtype=torch.bool)
opt_fn = torch.compile(fn, backend="inductor")
ref = fn(x, y, z)
# run it twice to test cuda graph issue
res = opt_fn(x, y, z)
res = opt_fn(x, y, z)
self.assertEqual(ref, res)
torch._dynamo.reset()
gc.collect()
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION, "flash attention not supported"
)
def test_flash_attention_dynamic(self):
class Model(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.q = nn.Linear(1024, 1024)
self.k = nn.Linear(1024, 1024)
self.v = nn.Linear(1024, 1024)
def forward(self, x):
batch_size, seq_len, _ = x.size()
queries = self.q(x).view(batch_size, seq_len, 8, 128).transpose(2, 1)
keys = self.k(x).view(batch_size, seq_len, 8, 128).transpose(2, 1)
values = self.v(x).view(batch_size, seq_len, 8, 128).transpose(2, 1)
attn = F.scaled_dot_product_attention(
queries,
keys,
values,
)
return attn
cnts = torch._dynamo.testing.CompileCounterWithBackend("inductor")
model = Model().cuda().half()
model = torch.compile(model, backend=cnts, dynamic=True)
with torch.backends.cuda.sdp_kernel(
enable_flash=True,
enable_math=False,
enable_mem_efficient=False,
enable_cudnn=False,
):
input1 = torch.rand(5, 512, 1024, device="cuda", dtype=torch.float16)
input2 = torch.rand(5, 513, 1024, device="cuda", dtype=torch.float16)
input3 = torch.rand(5, 514, 1024, device="cuda", dtype=torch.float16)
out1 = model(input1)
out2 = model(input2)
out3 = model(input3)
self.assertEqual(cnts.frame_count, 2)
@config.patch({"triton.cudagraphs": True})
def test_index_put_no_fallback_cudagraph(self):
def fn(x, y, z):
x = torch.zeros_like(x)
return x.index_put([y], z, True)
x = torch.zeros((512, 512), device="cuda", dtype=torch.int32)
y = torch.zeros((512,), device="cuda", dtype=torch.int64)
z = torch.ones((512, 512), device="cuda", dtype=torch.int32)
opt_fn = torch.compile(fn, backend="inductor")
ref = fn(x, y, z)
# run it twice to test cuda graph issue
res = opt_fn(x, y, z)
res = opt_fn(x, y, z)
self.assertEqual(ref, res)
@torch._inductor.config.patch(emulate_precision_casts=True)
def test_emulate_precision_casts_norm_rounding(self):
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
x = torch.rand(1000, device="cuda", dtype=torch.bfloat16)
scalar = torch.rand([], device="cuda", dtype=torch.float32)
def fn(inp, scale):
y = inp.norm()
return y, y + scale
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True, dynamic=True)
expected = fn(x, scalar)
actual = opt_fn(x, scalar)
self.assertEqual(expected, actual)
@torch._inductor.config.patch(emulate_precision_casts=True)
def test_emulate_precision_casts_min_pow_chain(self):
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
with dynamo_config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
):
arg0 = torch.rand(
[383, 55, 2, 3],
dtype=torch.float16,
device="cuda",
requires_grad=True,
)
arg1 = torch.rand(
[383, 55], dtype=torch.bfloat16, device="cuda", requires_grad=True
)
arg2 = torch.rand(
[383, 55], dtype=torch.float32, device="cuda", requires_grad=True
)
arg3 = torch.rand(
[383, 55], dtype=torch.float32, device="cuda", requires_grad=True
)
def fn(a0, a1, a2, a3):
t1 = a0.min(dim=2).values
t2 = t1.sum(dim=2)
t6 = ((((a1) - a2) - a3) - a3) - a3
t7 = t6 + t2
t8 = torch.pow(torch.pow(torch.pow(torch.pow(t2, t7), t7), t7), t7)
return t7, t8
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True, dynamic=True)
eager_out = fn(arg0, arg1, arg2, arg3)
compiled_args = [
arg0.clone().detach().requires_grad_(True),
arg1.clone().detach().requires_grad_(True),
arg2.clone().detach().requires_grad_(True),
arg3.clone().detach().requires_grad_(True),
]
compiled_out = opt_fn(*compiled_args)
for eager_tensor, compiled_tensor in zip(eager_out, compiled_out):
torch.testing.assert_close(
eager_tensor,
compiled_tensor,
rtol=1e-3,
atol=1e-3,
)
@torch._inductor.config.patch(emulate_precision_casts=True)
def test_emulate_precision_casts_mean_ratio_chain(self):
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
with dynamo_config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
):
arg0 = torch.rand(
[125070], dtype=torch.bfloat16, device="cuda", requires_grad=True
)
arg1 = torch.rand(
[1895, 3, 11], dtype=torch.float16, device="cuda", requires_grad=True
)
arg2 = torch.rand(
[1895, 3, 11], dtype=torch.float32, device="cuda", requires_grad=True
)
arg3 = torch.rand(
[1895, 3, 11], dtype=torch.float32, device="cuda", requires_grad=True
)
arg4 = torch.rand(
[1895, 3, 11], dtype=torch.float32, device="cuda", requires_grad=True
)
arg5 = torch.rand(
[5, 379, 165], dtype=torch.float32, device="cuda", requires_grad=True
)
def fn(a0, a1, a2, a3, a4, a5):
t2 = a0.view(379, 165, 2).mean(dim=2)
t7 = ((((a1) - a2) - a3) - a2) - a4
t8 = t7.view(379, 165)
t11 = torch.nn.functional.relu(a5).mean(dim=0)
t12 = t2 - t11
t13 = (((t2) / t8) / t11) / t12
return t13
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True, dynamic=True)
eager_out = fn(arg0, arg1, arg2, arg3, arg4, arg5)
compiled_args = [
tensor.clone().detach().requires_grad_(True)
for tensor in (arg0, arg1, arg2, arg3, arg4, arg5)
]
compiled_out = opt_fn(*compiled_args)
torch.testing.assert_close(
eager_out,
compiled_out,
rtol=5e-3,
atol=1e-1,
)
@torch._inductor.config.patch(emulate_precision_casts=True)
def test_dont_inplace_disjoint_accesses(self):
# TODO - would not need mms if we could annotate donated buffer..
def forward( # noqa: F821, F722
arg0_1: "bf16[2048, 2048][2048, 1]cuda:0", # noqa: F821, F722
arg1_1: "bf16[8, 4096, 2048][8388608, 2048, 1]cuda:0", # noqa: F821, F722
arg2_1: "bf16[2048, 2048][2048, 1]cuda:0", # noqa: F821, F722
arg3_1: "bf16[2048, 2048][2048, 1]cuda:0", # noqa: F821, F722
arg4_1: "bf16[2048][1]cuda:0", # noqa: F821, F722
arg5_1: "bf16[2048][1]cuda:0", # noqa: F821, F722
arg6_1: "f32[4096, 128][128, 1]cuda:0", # noqa: F821, F722
arg7_1: "f32[4096, 128][128, 1]cuda:0", # noqa: F821, F722
):
permute = torch.ops.aten.permute.default(arg0_1, [1, 0])
arg0_1 = None
view = torch.ops.aten.view.default(arg1_1, [32768, 2048])
mm = torch.ops.aten.mm.default(view, permute)
view = permute = None
view_1 = torch.ops.aten.view.default(mm, [8, 4096, 2048])
mm = None
permute_1 = torch.ops.aten.permute.default(arg2_1, [1, 0])
arg2_1 = None
view_2 = torch.ops.aten.view.default(arg1_1, [32768, 2048])
mm_1 = torch.ops.aten.mm.default(view_2, permute_1)
view_2 = permute_1 = None
view_3 = torch.ops.aten.view.default(mm_1, [8, 4096, 2048])
mm_1 = None
permute_2 = torch.ops.aten.permute.default(arg3_1, [1, 0])
arg3_1 = None
view_4 = torch.ops.aten.view.default(arg1_1, [32768, 2048])
arg1_1 = None
mm_2 = torch.ops.aten.mm.default(view_4, permute_2)
view_4 = permute_2 = None
view_5 = torch.ops.aten.view.default(mm_2, [8, 4096, 2048])
mm_2 = None
convert_element_type_6 = torch.ops.prims.convert_element_type.default(
view_1, torch.float32
)
view_1 = None
pow_1 = torch.ops.aten.pow.Tensor_Scalar(convert_element_type_6, 2)
mean = torch.ops.aten.mean.dim(pow_1, [-1], True)
pow_1 = None
add = torch.ops.aten.add.Tensor(mean, 1e-06)
mean = None
rsqrt = torch.ops.aten.rsqrt.default(add)
add = None
mul = torch.ops.aten.mul.Tensor(convert_element_type_6, rsqrt)
convert_element_type_6 = rsqrt = None
convert_element_type_7 = torch.ops.prims.convert_element_type.default(
arg4_1, torch.float32
)
arg4_1 = None
mul_1 = torch.ops.aten.mul.Tensor(convert_element_type_7, mul)
convert_element_type_7 = mul = None
convert_element_type_8 = torch.ops.prims.convert_element_type.default(
mul_1, torch.bfloat16
)
mul_1 = None
convert_element_type_9 = torch.ops.prims.convert_element_type.default(
view_3, torch.float32
)
view_3 = None
pow_2 = torch.ops.aten.pow.Tensor_Scalar(convert_element_type_9, 2)
mean_1 = torch.ops.aten.mean.dim(pow_2, [-1], True)
pow_2 = None
add_1 = torch.ops.aten.add.Tensor(mean_1, 1e-06)
mean_1 = None
rsqrt_1 = torch.ops.aten.rsqrt.default(add_1)
add_1 = None
mul_2 = torch.ops.aten.mul.Tensor(convert_element_type_9, rsqrt_1)
convert_element_type_9 = rsqrt_1 = None
convert_element_type_10 = torch.ops.prims.convert_element_type.default(
arg5_1, torch.float32
)
arg5_1 = None
mul_3 = torch.ops.aten.mul.Tensor(convert_element_type_10, mul_2)
convert_element_type_10 = mul_2 = None
convert_element_type_11 = torch.ops.prims.convert_element_type.default(
mul_3, torch.bfloat16
)
mul_3 = None
view_6 = torch.ops.aten.view.default(
convert_element_type_8, [8, 4096, -1, 128]
)
convert_element_type_8 = None
view_7 = torch.ops.aten.view.default(
convert_element_type_11, [8, 4096, -1, 128]
)
convert_element_type_11 = None
view_8 = torch.ops.aten.view.default(view_5, [8, 4096, -1, 128])
view_5 = None
convert_element_type_12 = torch.ops.prims.convert_element_type.default(
view_6, torch.float32
)
view_6 = None
convert_element_type_13 = torch.ops.prims.convert_element_type.default(
view_7, torch.float32
)
view_7 = None
unsqueeze = torch.ops.aten.unsqueeze.default(arg6_1, 0)
unsqueeze_1 = torch.ops.aten.unsqueeze.default(unsqueeze, 2)
unsqueeze = None
unsqueeze_2 = torch.ops.aten.unsqueeze.default(arg7_1, 0)
unsqueeze_3 = torch.ops.aten.unsqueeze.default(unsqueeze_2, 2)
unsqueeze_2 = None
mul_4 = torch.ops.aten.mul.Tensor(convert_element_type_12, unsqueeze_3)
unsqueeze_3 = None
view_9 = torch.ops.aten.view.default(
convert_element_type_12, [8, 4096, 16, 2, 64]
)
convert_element_type_12 = None
unbind = torch.ops.aten.unbind.int(view_9, -2)
view_9 = None
getitem = unbind[0]
getitem_1 = unbind[1]
unbind = None
neg = torch.ops.aten.neg.default(getitem_1)
getitem_1 = None
cat = torch.ops.aten.cat.default([neg, getitem], -1)
neg = getitem = None
mul_5 = torch.ops.aten.mul.Tensor(cat, unsqueeze_1)
cat = unsqueeze_1 = None
add_2 = torch.ops.aten.add.Tensor(mul_4, mul_5)
mul_4 = mul_5 = None
unsqueeze_4 = torch.ops.aten.unsqueeze.default(arg6_1, 0)
arg6_1 = None
unsqueeze_5 = torch.ops.aten.unsqueeze.default(unsqueeze_4, 2)
unsqueeze_4 = None
unsqueeze_6 = torch.ops.aten.unsqueeze.default(arg7_1, 0)
arg7_1 = None
unsqueeze_7 = torch.ops.aten.unsqueeze.default(unsqueeze_6, 2)
unsqueeze_6 = None
mul_6 = torch.ops.aten.mul.Tensor(convert_element_type_13, unsqueeze_7)
unsqueeze_7 = None
view_10 = torch.ops.aten.view.default(
convert_element_type_13, [8, 4096, 16, 2, 64]
)
convert_element_type_13 = None
unbind_1 = torch.ops.aten.unbind.int(view_10, -2)
view_10 = None
getitem_2 = unbind_1[0]
getitem_3 = unbind_1[1]
unbind_1 = None
neg_1 = torch.ops.aten.neg.default(getitem_3)
getitem_3 = None
cat_1 = torch.ops.aten.cat.default([neg_1, getitem_2], -1)
neg_1 = getitem_2 = None
mul_7 = torch.ops.aten.mul.Tensor(cat_1, unsqueeze_5)
cat_1 = unsqueeze_5 = None
add_3 = torch.ops.aten.add.Tensor(mul_6, mul_7)
mul_6 = mul_7 = None
convert_element_type_14 = torch.ops.prims.convert_element_type.default(
add_2, torch.bfloat16
)
add_2 = None
convert_element_type_15 = torch.ops.prims.convert_element_type.default(
add_3, torch.bfloat16
)
add_3 = None
permute_3 = torch.ops.aten.permute.default(
convert_element_type_14, [0, 2, 1, 3]
)
convert_element_type_14 = None
permute_4 = torch.ops.aten.permute.default(
convert_element_type_15, [0, 2, 1, 3]
)
convert_element_type_15 = None
permute_5 = torch.ops.aten.permute.default(view_8, [0, 2, 1, 3])
view_8 = None
return (permute_3, permute_4, permute_5)
from torch._dynamo.debug_utils import aot_graph_input_parser
kwargs = aot_graph_input_parser(forward)
out, code = run_and_get_code(torch.compile(forward), **kwargs)
# ignore tiny values.. prior to this fix absolute error was ~28
self.assertEqual(forward(**kwargs), out, atol=0.01, rtol=2)
FileCheck().check_not("in_out").run(code[0])
# https://github.com/pytorch/pytorch/issues/104937
def test_linear_with_zero_infeature_size(self):
m = nn.Linear(in_features=0, out_features=0, bias=True).to("cuda")
x = torch.rand(1, 1, 0, device="cuda")
expect = m(x)
opt_fn = torch.compile(m)
actual = opt_fn(x)
self.assertEqual(expect, actual)
@config.patch(fallback_random=True)
def test_multi_output_layout_fallback(self):
mod = nn.RReLU(lower=3.2350976, upper=8.4220314, inplace=True)
inp = torch.rand([4, 4]).cuda()
m = torch.compile(mod)
with freeze_rng_state():
o1 = m(inp.clone())
o2 = mod(inp.clone())
self.assertEqual(o1, o2)
def test_sorted_masks(self):
@torch.compile()
def foo(x, y):
return (x + y).sum(dim=1)
x = torch.rand([255, 255], device="cuda")
y = torch.rand([255, 255], device="cuda")
_, code = run_and_get_code(foo, x, y)
FileCheck().check("tl.load").check_same("r0_mask").check_same("xmask").run(
code[0]
)
def test_cat_int8_one_kernel(self):
@torch.compile()
def cat(inps):
return torch.cat(inps) + 1
for dtype in [torch.uint8, torch.int8]:
inps = [
torch.empty([256, 256], dtype=dtype, device="cuda") for _ in range(4)
]
out, code = run_and_get_code(cat, inps)
self.assertEqual(torch.cat(inps) + 1, out)
FileCheck().check_not("aten.cat.default(").check_count(
".run(", 1, exactly=True
).run(code[0])
@config.patch("triton.use_block_ptr", True)
def test_selecsls42b_misaligned_address(self):
# https://github.com/triton-lang/triton/issues/2836
@torch.compile(fullgraph=True)
def fn(arg207_1, arg208_1, convert_element_type_40, expand, full, mul_3):
div = torch.ops.aten.div.Scalar(expand, 16)
where = torch.ops.aten.where.self(arg207_1, full, div)
convert_element_type_43 = torch.ops.prims.convert_element_type.default(
where, torch.float32
)
sum_2 = torch.ops.aten.sum.dim_IntList(convert_element_type_43, [0, 2, 3])
sub = torch.ops.aten.sub.Tensor(convert_element_type_40, arg208_1)
mul = torch.ops.aten.mul.Tensor(convert_element_type_43, sub)
sum_3 = torch.ops.aten.sum.dim_IntList(mul, [0, 2, 3])
mul_1 = torch.ops.aten.mul.Tensor(sum_2, 0.0078125)
unsqueeze = torch.ops.aten.unsqueeze.default(mul_1, 0)
unsqueeze_1 = torch.ops.aten.unsqueeze.default(unsqueeze, 2)
unsqueeze_2 = torch.ops.aten.unsqueeze.default(unsqueeze_1, 3)
mul_2 = torch.ops.aten.mul.Tensor(sum_3, 0.0078125)
mul_4 = torch.ops.aten.mul.Tensor(mul_2, mul_3)
unsqueeze_3 = torch.ops.aten.unsqueeze.default(mul_4, 0)
unsqueeze_4 = torch.ops.aten.unsqueeze.default(unsqueeze_3, 2)
unsqueeze_5 = torch.ops.aten.unsqueeze.default(unsqueeze_4, 3)
mul_6 = torch.ops.aten.mul.Tensor(sub, unsqueeze_5)
sub_1 = torch.ops.aten.sub.Tensor(convert_element_type_43, mul_6)
sub_2 = torch.ops.aten.sub.Tensor(sub_1, unsqueeze_2)
return (sub_2,)
args = [
torch.randn((8, 1024, 4, 4), device="cuda") > 0, # torch.bool tensor
torch.randn((1, 1024, 1, 1), device="cuda"),
torch.randn((8, 1024, 4, 4), device="cuda"),
torch.randn((8, 1024, 1, 1), dtype=torch.float16, device="cuda").expand(
(8, 1024, 4, 4)
),
torch.randn((), device="cuda"),
torch.randn((1024,), device="cuda"),
]
fn(*args)
torch.cuda.synchronize() # shake out Triton Error [CUDA]: misaligned address
def test_mutated_aligned_tensor(self):
t = torch.rand(4096, device="cuda", dtype=torch.float16)
def foo(x):
return x.add_(1)
foo_c = torch.compile(dynamic=False)(foo)
t_orig = t.clone()
# First invocation, assume alignment, second invocation,
# copy to alignment and then mutate after fn invocation
self.assertEqual(foo_c(t[:-1]), foo(t_orig[:-1]))
self.assertEqual(t, t_orig)
self.assertEqual(foo_c(t[1:]), foo(t_orig[1:]))
self.assertEqual(t, t_orig)
def test_non_commutative_scan_op(self):
from torch._higher_order_ops.associative_scan import associative_scan
a = torch.randn(1024, 8192, dtype=torch.float64, device="cuda")
b = torch.randn(1024, 8192, dtype=torch.float64, device="cuda")
def baseline(v, u):
A = []
A.append(b[:, 0])
for i in range(1, v.shape[1]):
A.append(a[:, i] * A[i - 1] + b[:, i])
return torch.stack(A, dim=1)
def combine_fn(i, j):
ia, ib = i
ja, jb = j
return ia * ja, ib * ja + jb
@torch.compile
def compiled_scan(a, b):
return associative_scan(combine_fn, (a, b), dim=-1)[1]
out1 = baseline(a, b)
out2 = compiled_scan(a, b)
self.assertEqual(out1, out2)
def test_dynamic_persistent_reductions(self):
@torch.compile(dynamic=True)
def inner_reduce(x):
assert x.shape[1] <= 1024
return x.sum(1)
a = torch.randn(50, 600, device="cuda")
out, code = run_and_get_code(inner_reduce, a)
self.assertEqual(inner_reduce(a), out)
self.assertTrue("for roffset" not in code)
@torch.compile(dynamic=True)
def outer_reduce(x):
assert x.shape[0] <= 64
return x.sum(0)
out, code = run_and_get_code(outer_reduce, a)
self.assertEqual(outer_reduce(a), out)
self.assertTrue("for roffset" not in code)
def test_scaled_dot_product_efficient_attention_backward(self):
from torch import nn, Tensor
class SelfAttention(nn.Module):
def __init__(
self,
num_attention_heads: int = 12,
hidden_size: int = 768,
attention_probs_dropout_prob: float = 0.1,
):
super().__init__()
self.num_attention_heads = num_attention_heads
self.attention_head_size = hidden_size // num_attention_heads
self.query = nn.Linear(hidden_size, hidden_size)
self.key = nn.Linear(hidden_size, hidden_size)
self.value = nn.Linear(hidden_size, hidden_size)
self.dropout_prob = attention_probs_dropout_prob
def transpose_for_scores(self, x: Tensor) -> Tensor:
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
return x.view(new_x_shape).permute(0, 2, 1, 3)
def forward(self, hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_layer,
key_layer,
value_layer,
attn_mask=attention_mask,
dropout_p=self.dropout_prob if self.training else 0.0,
is_causal=False,
)
return attn_output
device = torch.device("cuda")
num_attention_heads = 8
hidden_size = 512
attention_probs_dropout_prob = 0.0
model = SelfAttention(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
).to(device)
model = torch.compile(model)
# runs without failure
batch_size = 8
length = 1
inputs_embeds = torch.randn(batch_size, length, hidden_size, device=device)
attention_mask = torch.ones(batch_size, 1, length, length, device=device)
attn_output = model(hidden_states=inputs_embeds, attention_mask=attention_mask)[
0
]
loss = attn_output.mean()
loss.backward()
def test_non_contiguous_unaligned_input_indices(self):
from torch._inductor.compile_fx import remove_unaligned_input_idxs
inputs = [torch.ones(2, 2, device="cuda"), torch.ones(2, 2, device="cuda")[1:]]
idxs = remove_unaligned_input_idxs(inputs, [1])
self.assertEqual(idxs, [])
inputs = [
torch.ones(2, 2, device="cuda"),
torch.ones(2, 2, device="cuda"),
torch.ones(2, 2, device="cuda")[1:],
]
idxs = remove_unaligned_input_idxs(inputs, [0, 2])
self.assertEqual(idxs, [0])
@config.patch("triton.cudagraphs", True)
def test_unused_cpu_input_cudagraphs(self):
def fn(x, y):
return x.sin().sin().sin().sin().cos() + 1
fx_graph = torch.fx.symbolic_trace(fn)
inp = [torch.randn(64, device="cuda"), torch.randn(64, device="cpu")]
compiled_fn, (graph,) = run_and_get_graph_lowering(
torch._inductor.compile, fx_graph, inp
)
self.assertEqual(graph.disable_cudagraphs_reason, None)
self.assertEqual(graph.device_types, {"cuda"})
self.assertEqual(compiled_fn(*inp), fn(*inp))
def test_epilogue_fusion_with_view(self):
class ToyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.linear = torch.nn.Linear(262144, 100)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = x.view(x.size(0), -1)
return self.relu(self.linear(x))
m = ToyModel().to(device="cuda:0")
input_tensor = torch.randn(32, 3, 64, 64).to(device="cuda:0")
from torch._inductor.utils import fresh_cache
with fresh_cache():
cm = torch.compile(m, mode="max-autotune")
out = cm(input_tensor)
out2 = m(input_tensor)
self.assertEqual(out, out2, atol=1e-3, rtol=1e-3)
@config.patch("triton.cudagraphs", True)
def test_cpu_index(self):
@torch.compile(fullgraph=True)
def fn(x):
return x[torch.arange(32)]
result, (graph,) = run_and_get_graph_lowering(
fn, torch.randn(64, device="cuda")
)
self.assertEqual(graph.disable_cudagraphs_reason, None)
self.assertEqual(graph.device_types, {"cuda"})
inp = torch.randn(64, device="cuda", requires_grad=True)
result, (graph,) = run_and_get_graph_lowering(fn, inp)
self.assertEqual(graph.disable_cudagraphs_reason, None)
self.assertEqual(graph.device_types, {"cuda"})
result, (graph,) = run_and_get_graph_lowering(lambda: result.sum().backward())
self.assertEqual(graph.disable_cudagraphs_reason, None)
self.assertEqual(graph.device_types, {"cuda"})
@unittest.skipIf(IS_FBCODE, "Not runnable in fbcode")
def test_triton_interpret(self):
import subprocess
script = """
import os
os.environ["TRITON_INTERPRET"] = "1"
import torch
@torch.compile()
def foo(x):
return x + 1
# somehow gives different results.. still, check that it doesn't error
foo(torch.rand([256], device="cuda"))
"""
subprocess.run([sys.executable, "-c", script], check=True)
def test_reflection_pad_loop_order(self):
def fn(x, y):
a = torch.nn.functional.pad(x, (5, 5, 5, 5), mode="reflect")
b = torch.nn.functional.pad(y, (5, 5, 5, 5), mode="reflect")
return a + b
cfn = torch.compile(fn)
a = torch.rand((10, 10, 10), device="cuda")
b = torch.rand((10, 10, 10), device="cuda")
expect = fn(a, b)
actual, code = run_and_get_code(cfn, a, b)
self.assertEqual(expect, actual)
# Expect the code iterates in contiguous order, and is not tiled
lines = code[0].split("\n")
start = lines.index("@triton.jit")
kernel_code = "\n".join(lines[start : start + 14])
self.assertExpectedInline(
kernel_code,
"""\
@triton.jit
def triton_poi_fused_add_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = (xindex % 20)
x1 = ((xindex // 20) % 20)
x2 = xindex // 400
x3 = xindex
tmp0 = tl.load(in_ptr0 + (99 + ((-1)*tl_math.abs((-9) + tl_math.abs((-5) + x0))) + ((-10)*tl_math.abs((-9) + tl_math.abs((-5) + x1))) + 100*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (99 + ((-1)*tl_math.abs((-9) + tl_math.abs((-5) + x0))) + ((-10)*tl_math.abs((-9) + tl_math.abs((-5) + x1))) + 100*x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)""", # noqa: B950
)
@skipCUDAIf(not SM80OrLater, "uses bfloat16 which requires SM >= 80")
def test_int64_index_intermediate(self):
def foo(inp):
view_23 = torch.ops.aten.view.default(inp, [-1, 8192, 8192])
split_1 = torch.ops.aten.split.Tensor(view_23, 1024, 1)
view_23 = None
getitem_17 = split_1[0]
getitem_18 = split_1[1]
getitem_19 = split_1[2]
getitem_20 = split_1[3]
getitem_21 = split_1[4]
getitem_22 = split_1[5]
getitem_23 = split_1[6]
getitem_24 = split_1[7]
split_1 = None
cat_1 = torch.ops.aten.cat.default(
[
getitem_17,
getitem_18,
getitem_19,
getitem_20,
getitem_21,
getitem_22,
getitem_23,
getitem_24,
]
)
getitem_17 = getitem_18 = getitem_19 = getitem_20 = getitem_21 = (
getitem_22
) = getitem_23 = getitem_24 = None
return cat_1
for mark_dynamic in [False, True]:
inp = torch.rand((65536, 8192), dtype=torch.bfloat16, device="cuda")
if mark_dynamic:
torch._dynamo.mark_dynamic(inp, 0)
foo_c = torch.compile(foo)
torch.testing.assert_allclose(foo(inp), foo_c(inp))
@skipCUDAIf(
not SM90OrLater, "uses bfloat16 atomic add instrs which requires SM >= 90"
)
def test_float8_e8m0fnu(self):
device = "cuda"
dtype = torch.float8_e8m0fnu
hp_dtype = torch.float32 # and torch.bfloat16
def foo(x0):
x1 = x0.to(dtype)
x2 = x1.to(hp_dtype)
return x2
x0 = torch.randn(16, 16, device=device, dtype=hp_dtype)
foo_c = torch.compile(foo, backend="inductor", fullgraph=True)
with torch.no_grad():
y_c = foo_c(x0)
self.assertEqual(foo(x0), y_c)
dtype = torch.float8_e8m0fnu
def foo(x0):
x1 = x0 + 1
x2 = x1.view(dtype).view([16 * 16])
return x2
x0 = torch.randint(0, 255, (16, 16), device=device, dtype=torch.uint8)
foo_c = torch.compile(foo, backend="inductor", fullgraph=True)
with torch.no_grad():
result, code = run_and_get_code(foo_c, x0)
FileCheck().check("call").check_not("torch.ops.aten.reshape.default(").run(
code[0]
)
self.assertEqual(foo(x0), result)
@unittest.skipIf(
not config.is_fbcode(),
"bfloat16 atomic add is only supported in fbcode today #97016",
)
@skipCUDAIf(
not SM90OrLater, "uses bfloat16 atomic add instrs which requires SM >= 90"
)
def test_atomic_add_bfloat16(self):
def f(x, y):
return torch.index_select(x, 0, y)
x = torch.randn(
2000, 384, dtype=torch.bfloat16, device="cuda", requires_grad=True
)
y = torch.ones(713268, dtype=torch.int64, device="cuda")
x_ref = x.clone().detach().requires_grad_(True)
y_ref = y.clone().detach()
out, (_, bw_code) = run_fw_bw_and_get_code(lambda: torch.compile(f)(x, y))
fc = FileCheck()
fc.check("tl.atomic_add")
fc.run(bw_code)
self.assertEqual(f(x_ref, y_ref), out)
def test_red_dtype_mismatch(self):
for per in (True, False):
torch._dynamo.reset()
if not per:
torch._inductor.config.triton.persistent_reductions = False
def f(arg0_1, arg1_1):
embedding = torch.ops.aten.embedding.default(arg1_1, arg0_1)
view = torch.ops.aten.view.default(embedding, [64, 3072])
unsqueeze = torch.ops.aten.unsqueeze.default(view, 0)
expand = torch.ops.aten.expand.default(unsqueeze, [576, -1, -1])
view_1 = torch.ops.aten.view.default(expand, [2, 8, 36, 64, 3072])
permute = torch.ops.aten.permute.default(view_1, [0, 1, 3, 2, 4])
clone = torch.ops.aten.clone.default(
permute, memory_format=torch.contiguous_format
)
view_2 = torch.ops.aten.view.default(clone, [2, 18432, 3072])
iota = torch.ops.prims.iota.default(
36,
start=0,
step=1,
dtype=torch.int64,
device="cuda",
requires_grad=False,
)
view_3 = torch.ops.aten.view.default(iota, [1, 36])
max_1 = torch.ops.aten.max.default(view_3)
return (max_1,)
x = torch.ones(1, 64, device="cuda", dtype=torch.int64)
y = torch.randn(64, 3072, device="cuda", dtype=torch.bfloat16)
out = f(x, y)
self.assertEqual(torch.compile(f)(x, y), out)
@skipCUDAIf(
not SM90OrLater, "uses bfloat16 atomic add instrs which requires SM >= 90"
)
@unittest.skipIf(
config.is_fbcode(),
"bfloat16 atomic add is supported in fbcode, so we won't fallback",
)
def test_index_add_fallback(self):
def f(x, y):
return torch.index_select(x, 0, y)
x = torch.randn(
2000, 384, dtype=torch.bfloat16, device="cuda", requires_grad=True
)
y = torch.ones(713268, dtype=torch.int64, device="cuda")
x_ref = x.clone().detach().requires_grad_(True)
y_ref = y.clone().detach()
out, (_, bw_code) = run_fw_bw_and_get_code(lambda: torch.compile(f)(x, y))
fc = FileCheck()
fc.check("aten.index_add")
fc.run(bw_code)
self.assertEqual(f(x_ref, y_ref), out)
@requires_multigpu()
def test_not_initializing_wrong_device(self):
device_stats = torch.cuda.memory_stats("cuda:0")
@torch.compile()
def foo(x, y):
return x @ y
x = torch.rand([256, 256], device="cuda:1", requires_grad=True)
y = torch.rand([256, 256], device="cuda:1", requires_grad=True)
foo(x, y).sum().backward()
device_stats2 = torch.cuda.memory_stats("cuda:0")
self.assertTrue(
device_stats2["active.all.peak"] <= device_stats["active.all.peak"]
)
@config.patch(
{
"triton.prefer_nd_tiling": True,
"triton.max_tiles": 3,
}
)
def test_3d_tiling(self):
full_size, view_size, num_block_pointers, num_tiles = (
(5, 5, 5, 5, 5),
(3, 3, 5, 3, 5),
1,
2,
)
GPU_TYPE = "cuda"
def get_input() -> torch.Tensor:
device = torch.device(GPU_TYPE)
full = torch.randn(full_size).to(device)
return torch.as_strided(full, view_size, full.stride())
a, b = get_input(), get_input()
opt_fn = torch.compile(functools.partial(torch.add))
result, (code,) = run_and_get_code(opt_fn, a, b)
self.assertEqual(result, a + b)
self.assertIn("znumel", code)
@xfailIfPy312Plus # https://github.com/pytorch/pytorch/issues/142032
@unittest.skipIf(config.is_fbcode(), "Dependence on functorch.einops")
def test_repeated_masked_load(self):
target_size = (8, 2)
mem_eff_temporal_upsampling_interp_chunks = 2
from functorch.einops import rearrange
x = torch.randn(1, 8, 12, 12, 4, dtype=torch.float16, device="cuda")
x = x.permute(0, 1, 4, 2, 3) # make non-contiguous
x = rearrange(x, "b c t h w -> b c t (h w)")
def interpolate_chunked(x):
# chunk along c
chunks = x.chunk(chunks=mem_eff_temporal_upsampling_interp_chunks, dim=1)
r = []
for t in chunks:
r.append(
torch.nn.functional.interpolate(
t.float(), size=target_size, mode="nearest"
).to(t.dtype)
)
out_chunked = torch.cat(r, dim=1)
return out_chunked
out_eager = interpolate_chunked(x)
out_compiled = torch.compile(interpolate_chunked)(x)
self.assertEqual(out_eager, out_compiled)
def test_max_autotune_nograd(self):
"""
https://github.com/pytorch/pytorch/issues/155688
Smallest repro for max-autotune not working with no_grad
Before adding __int__ function to torch.utils._sympy.functions.Identity,
running the max_autotune mode would raise an error:
TypeError: Expected a number but got Identity
"""
class ToyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_layers = nn.ModuleList(
[
nn.Linear(4, 1, bias=True),
nn.Linear(5, 1, bias=True),
nn.Linear(6, 1, bias=True),
nn.Linear(7, 1, bias=True),
nn.Linear(8, 1, bias=True),
]
)
def forward(self, x):
for layer in self.linear_layers:
x2 = layer(x)
x2 = F.relu(x2)
x = torch.cat((x, x2), dim=1)
return x
model = ToyModel().to("cuda")
input_tensor = torch.randn((2, 4)).to("cuda")
compile_default = torch.compile(model, mode="default")
compile_max_autotune = torch.compile(model, mode="max-autotune")
with torch.no_grad():
default_output = compile_default(input_tensor)
max_autotune_output = compile_max_autotune(input_tensor)
self.assertEqual(default_output, max_autotune_output)
def test_adaptive_avg_pool3d_issue_157248(self):
"""Test for GitHub issue #157248: Conv2d-unsqueeze-AdaptiveAvgPool3d produces incorrect results"""
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.adaptive_pool = torch.nn.AdaptiveAvgPool3d((4, 4, 4))
def forward(self, x):
x = self.conv(x)
# This specific unsqueeze position was problematic due to zero strides
x = x.unsqueeze(1)
x = self.adaptive_pool(x)
return x
model = Model().cuda()
model.eval()
test_cases = [
(1, 3, 8, 8),
(2, 3, 16, 16),
(1, 3, 32, 32),
(1, 3, 15, 15),
(2, 3, 13, 13),
]
for batch, channels, h, w in test_cases:
with self.subTest(input_shape=(batch, channels, h, w)):
input_tensor = torch.randn(batch, channels, h, w, device="cuda")
# Test eager mode
with torch.no_grad():
eager_output = model(input_tensor)
# Test compiled mode with inductor
compiled_model = torch.compile(model, backend="inductor")
with torch.no_grad():
compiled_output = compiled_model(input_tensor)
# They should be identical (or very close)
self.assertTrue(
torch.allclose(eager_output, compiled_output, rtol=1e-5, atol=1e-5),
f"Results differ for input shape {(batch, channels, h, w)}. "
f"Max diff: {torch.max(torch.abs(eager_output - compiled_output)):.6f}",
)
@parametrize(
"quantiles_shape,quantiles_strides,batch_size",
[
((100, 10), (10, 1), 16), # Contiguous C-order
((100, 10), (1, 100), 16), # Transposed/F-order
((80, 12), (1, 80), 16), # Transposed different size
((50, 20), (1, 50), 16), # Transposed medium
((200, 8), (1, 200), 16), # Transposed large x small
((25, 40), (1, 25), 16), # Transposed small x large
((20, 5, 8), (40, 1, 5), 16), # 3D case with mixed strides
((20, 5, 8), (1, 20, 100), 16), # 3D case different stride order
],
)
def test_searchsorted_stride_permutations(
self, quantiles_shape, quantiles_strides, batch_size
):
class Foo(torch.nn.Module):
def __init__(self, quantiles: torch.Tensor) -> None:
super().__init__()
assert quantiles.shape[0] > 0
quantiles = quantiles.T
self.q = torch.nn.Parameter(quantiles, requires_grad=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.searchsorted(self.q, x.T).T
torch.manual_seed(42)
# Create contiguous tensor first
numel = 1
for dim in quantiles_shape:
numel *= dim
data = torch.randn(numel, dtype=torch.float32, device="cuda")
# Create tensor with specified shape and strides
quantiles = torch.as_strided(
data, size=quantiles_shape, stride=quantiles_strides
)
quantiles = torch.sort(quantiles, dim=0)[0]
x_shape = (batch_size,) + quantiles_shape[1:]
x = torch.randn(*x_shape, dtype=torch.float32, device="cuda")
foo = Foo(quantiles)
foo_compiled = torch.compile(Foo(quantiles), fullgraph=True)
# Test eager vs compiled
with torch.no_grad():
eager = foo(x)
compiled = foo_compiled(x)
self.assertEqual(eager, compiled)
def test_identity_load(self):
device = "cuda"
def f(x, y):
y2 = torch.cat(
[
x[:, 1:],
y[:, None] + 32 * 2048,
],
dim=1,
)
x2 = x[:, 1:, None]
y3 = y2[:, -1:, None]
return (
torch.cat([x2, y3], dim=1)
+ torch.arange(-2048, 0, device=device)[None, None, :]
).reshape(1, 32 * 2048)
# This succeeds
eager_out = f(
torch.zeros(1, 32, dtype=torch.int64, device=device),
torch.zeros(1, dtype=torch.int32, device=device),
)
# This crashes
compile_out, code = run_and_get_code(
torch.compile(f),
torch.zeros(1, 32, dtype=torch.int64, device=device),
torch.zeros(1, dtype=torch.int32, device=device),
)
# make sure the identity is maintained
FileCheck().check("(1 + ((31)").run(code[0])
self.assertEqual(eager_out, compile_out)
def test_qwen2_7b_sdpa_input_alignment_requires_recompile(self):
# SDPA constraints ensures inputs have alignment (8).
device = "cuda"
def forward(q_proj, k_proj, attn_mask):
scale = 0.08838834764831845 # 1/sqrt(128)
B = attn_mask.size(0)
S = attn_mask.size(3)
D = 128
d_model = q_proj.size(1)
query_states = q_proj.view(B, S, -1, D).transpose(1, 2) # [B, Hq, S, D]
q = query_states.contiguous()
Hkv = k_proj.size(1) // D
Hq = query_states.size(1)
nrepeats = Hq // Hkv
key_states = k_proj.view(B, S, -1, D).transpose(1, 2) # [B, Hkv, S, D]
kv_repeated = key_states[:, :, None, :].expand(B, Hkv, nrepeats, S, D)
kv_repeated = kv_repeated.contiguous()
k = kv_repeated.reshape(B, Hq, S, D)
v = k.clone() # value tensor
inf = torch.scalar_tensor(
float("-inf"), dtype=torch.bfloat16, device=device
)
zero = torch.scalar_tensor(0.0, dtype=torch.bfloat16, device=device)
where = torch.where(condition=attn_mask, input=zero, other=inf)
pad_amount = 8 - (S % 8)
padded = torch.nn.functional.pad(
where, (0, pad_amount), value=0.0
) # pad last-dim
sliced = padded[..., :S] # back to [B,1,S,S]
attn_bias = sliced.expand(B, Hq, S, S)
sdpa_out, logsumexp, seed, offset = (
torch.ops.aten._scaled_dot_product_efficient_attention.default(
q,
k,
v,
attn_bias,
dropout_p=0.0,
is_causal=True,
scale=scale,
compute_log_sumexp=True,
)
)
zeros = torch.zeros(B, S, d_model, device=device, dtype=torch.bfloat16)
zeros = zeros.reshape(B, S, Hq, D)
grad_out = zeros.permute(0, 2, 1, 3)
out = (
torch.ops.aten._scaled_dot_product_efficient_attention_backward.default(
grad_out,
q,
k,
v,
attn_bias,
sdpa_out,
logsumexp,
seed,
offset,
dropout_p=0.0,
scale=scale,
grad_input_mask=[True, True, True, False],
)
)
return out
B = 2
S = 6144
D = 128
Hq = 28
Hkv = 4
example_inputs = (
torch.randn((B * S, Hq * D), dtype=torch.bfloat16, device=device), # q_proj
torch.randn(
(B * S, Hkv * D), dtype=torch.bfloat16, device=device
), # k_proj
torch.zeros((B, 1, S, S), dtype=torch.bool, device=device), # attn_mask
)
correct = forward(*example_inputs)
compiled = torch.compile(forward, dynamic=True)
actual = compiled(*example_inputs)
self.assertEqual(actual, correct)
# run once more with seqlen that isn't divisible by 8
S = 6102
example_inputs = (
torch.randn((S * B, Hq * D), dtype=torch.bfloat16, device=device), # q_proj
torch.randn(
(S * B, Hkv * D), dtype=torch.bfloat16, device=device
), # k_proj
torch.zeros((B, 1, S, S), dtype=torch.bool, device=device), # attn_mask
)
correct = forward(*example_inputs)
actual = compiled(*example_inputs)
self.assertEqual(actual, correct)
@config.patch({"emulate_divison_rounding": True})
def test_truediv_emulate_divison_rounding(self):
from decimal import Decimal
y, x = 7.0, 11.0
@torch.compile
def compiled_divide(x, y):
return x / y
for y_dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
for x_dtype in [
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
]:
y_ten = torch.tensor([y], dtype=y_dtype, device="cuda")
x_ten = torch.tensor([x], dtype=x_dtype, device="cuda")
torch._dynamo.reset()
compiled_div = Decimal(compiled_divide(x_ten, y_ten).item())
eager_div = Decimal((x_ten / y_ten).item())
self.assertEqual(eager_div, compiled_div)
@config.patch({"emulate_divison_rounding": False})
def test_truediv_base_not_bitwise_equivalent(self):
from decimal import Decimal
y, x = 7.0, 11.0
y_ten = torch.tensor([y], dtype=torch.float32, device="cuda")
x_ten = torch.tensor([x], dtype=torch.float32, device="cuda")
compile_out, code = run_and_get_code(
torch.compile(lambda x, y: x / y),
x_ten,
y_ten,
)
compiled_div = Decimal(compile_out.item())
eager_div = Decimal((x_ten / y_ten).item())
self.assertNotEqual(eager_div, compiled_div)
self.assertTrue("div_rn" not in code)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
from torch.testing._internal.inductor_utils import HAS_CUDA_AND_TRITON
if HAS_CUDA_AND_TRITON and not TEST_WITH_ASAN:
run_tests(needs="filelock")
| CudaReproTests |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 44697,
"end": 45280
} | class ____:
"""
Information about the state of the FakeTensor dispatch cache.
"""
hits: int
misses: int
bypasses: dict[str, int]
size: int
# We keep one instantiation of `fake_tensor_converter` active
# for the duration of `with FakeTensorMode()`.
# This allows accurate storage aliasing across invocation of
# different operators. While this will keep all freshly allocated
# tensors alive during `FakeTensorMode`, there will be no
# new allocations of Tensors which have non-meta storage so
# memory should not significantly increase.
| DispatchCacheInfo |
python | facebook__pyre-check | stubs/integration_test/fixture_source/integration_test/list_comprehension.py | {
"start": 271,
"end": 710
} | class ____:
def run(self, command: str) -> str:
sink(command)
return ""
def take_input() -> None:
sinks: List[Sink] = [Sink()]
result = [s.run(source()) for s in sinks]
def inductive_comprehension_sink(arguments: List[str]) -> None:
command = " ".join(argument.lower() for argument in arguments)
sink(command)
def eval_via_comprehension_sink() -> None:
inductive_comprehension_sink(source())
| Sink |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9964
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.marker.colorbar.title"
_path_str = "scattergeo.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergeo.mar
ker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/padding.py | {
"start": 420,
"end": 4970
} | class ____(JupyterMixin):
"""Draw space around content.
Example:
>>> print(Padding("Hello", (2, 4), style="on blue"))
Args:
renderable (RenderableType): String or other renderable.
pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
May be specified with 1, 2, or 4 integers (CSS style).
style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
expand (bool, optional): Expand padding to fit available width. Defaults to True.
"""
def __init__(
self,
renderable: "RenderableType",
pad: "PaddingDimensions" = (0, 0, 0, 0),
*,
style: Union[str, Style] = "none",
expand: bool = True,
):
self.renderable = renderable
self.top, self.right, self.bottom, self.left = self.unpack(pad)
self.style = style
self.expand = expand
@classmethod
def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
"""Make padding instance to render an indent.
Args:
renderable (RenderableType): String or other renderable.
level (int): Number of characters to indent.
Returns:
Padding: A Padding instance.
"""
return Padding(renderable, pad=(0, 0, 0, level), expand=False)
@staticmethod
def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
"""Unpack padding specified in CSS style."""
if isinstance(pad, int):
return (pad, pad, pad, pad)
if len(pad) == 1:
_pad = pad[0]
return (_pad, _pad, _pad, _pad)
if len(pad) == 2:
pad_top, pad_right = cast(Tuple[int, int], pad)
return (pad_top, pad_right, pad_top, pad_right)
if len(pad) == 4:
top, right, bottom, left = cast(Tuple[int, int, int, int], pad)
return (top, right, bottom, left)
raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
def __repr__(self) -> str:
return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style)
if self.expand:
width = options.max_width
else:
width = min(
Measurement.get(console, options, self.renderable).maximum
+ self.left
+ self.right,
options.max_width,
)
render_options = options.update_width(width - self.left - self.right)
if render_options.height is not None:
render_options = render_options.update_height(
height=render_options.height - self.top - self.bottom
)
lines = console.render_lines(
self.renderable, render_options, style=style, pad=True
)
_Segment = Segment
left = _Segment(" " * self.left, style) if self.left else None
right = (
[_Segment(f'{" " * self.right}', style), _Segment.line()]
if self.right
else [_Segment.line()]
)
blank_line: Optional[List[Segment]] = None
if self.top:
blank_line = [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.top
if left:
for line in lines:
yield left
yield from line
yield from right
else:
for line in lines:
yield from line
yield from right
if self.bottom:
blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.bottom
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
max_width = options.max_width
extra_width = self.left + self.right
if max_width - extra_width < 1:
return Measurement(max_width, max_width)
measure_min, measure_max = Measurement.get(console, options, self.renderable)
measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
measurement = measurement.with_maximum(max_width)
return measurement
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich import print
print(Padding("Hello, World", (2, 4), style="on blue"))
| Padding |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/unselected/_marker.py | {
"start": 233,
"end": 4086
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl.unselected"
_path_str = "scatterpolargl.unselected.marker"
_valid_props = {"color", "opacity", "size"}
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.unselected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._set_property("size", arg, size)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | davidhalter__jedi | test/completion/parser.py | {
"start": 678,
"end": 744
} | class ____(object):
@property
#? ['str']
def bar(x=str
| Foo |
python | getsentry__sentry | src/sentry/integrations/jira/views/sentry_installation.py | {
"start": 610,
"end": 1826
} | class ____(JiraSentryUIBaseView):
"""
Handles requests (from the Sentry integration in Jira) for HTML to display when
setting up the integration in the Jira UI.
"""
html_file = "sentry/integrations/jira-config.html"
def get(self, request: Request, *args, **kwargs) -> Response:
try:
integration = get_integration_from_request(request, IntegrationProviderSlug.JIRA.value)
except AtlassianConnectValidationError:
return self.get_response({"error_message": UNABLE_TO_VERIFY_INSTALLATION})
except ExpiredSignatureError:
return self.get_response({"refresh_required": True})
# expose a link to the configuration view
signed_data = {
"external_id": integration.external_id,
"metadata": orjson.dumps(integration.metadata).decode(),
}
finish_link = "{}.?signed_params={}".format(
absolute_uri("/extensions/jira/configure/"), sign(salt=SALT, **signed_data)
)
image_path = absolute_uri(get_asset_url("sentry", "images/sentry-glyph-black.png"))
return self.get_response({"finish_link": finish_link, "image_path": image_path})
| JiraSentryInstallationView |
python | django__django | tests/gis_tests/geo3d/tests.py | {
"start": 4519,
"end": 9795
} | class ____(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
https://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
# Testing both geometry and geography fields
self.assertTrue(city.point.hasz)
self.assertTrue(city.pointg.hasz)
self.assertEqual(city.point.z, pnt_data[2])
self.assertEqual(city.pointg.z, pnt_data[2])
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name="3D BBox")
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
# Import here as GDAL is required for those imports
from django.contrib.gis.utils import LayerMapError, LayerMapping
point_mapping = {"point": "POINT"}
mpoint_mapping = {"mpoint": "MULTIPOINT"}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
with self.assertRaises(LayerMapError):
LayerMapping(Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_bulk_create_point_field(self):
objs = Point2D.objects.bulk_create([Point2D(), Point2D()])
self.assertEqual(len(objs), 2)
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
"SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,"
"-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,"
"-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)"
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union("point"))["point__union"]
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between
# implementations
self.assertEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent = City3D.objects.aggregate(Extent3D("point"))["point__extent3d"]
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
check_extent3d(extent)
self.assertIsNone(
City3D.objects.none().aggregate(Extent3D("point"))["point__extent3d"]
)
@skipUnlessDBFeature("supports_3d_functions")
def test_extent3d_filter(self):
self._load_city_data()
extent3d = City3D.objects.aggregate(
ll_cities=Extent3D("point", filter=Q(name__contains="ll"))
)["ll_cities"]
ref_extent3d = (-96.801611, -41.315268, 14.0, 174.783117, 32.782057, 147.0)
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, 6)
@skipUnlessDBFeature("supports_3d_functions")
| Geo3DTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 39285,
"end": 39931
} | class ____(_TestBasicOps, __TestCase):
def setUp(self):
self.enterContext(warnings_helper.check_warnings())
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
super().setUp()
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
| TestBasicOpsMixedStringBytes |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP050.py | {
"start": 378,
"end": 495
} | class ____(
A,
# comment
metaclass=type,
):
...
def foo():
class A(metaclass=type):
...
| B |
python | sqlalchemy__sqlalchemy | test/orm/test_lazy_relations.py | {
"start": 1477,
"end": 29823
} | class ____(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_basic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
},
)
sess = fixture_session()
q = sess.query(User)
eq_(
[
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
)
],
q.filter(users.c.id == 7).all(),
)
def test_needs_parent(self):
"""test the error raised when parent object is not bound."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
},
)
sess = fixture_session()
q = sess.query(User)
u = q.filter(users.c.id == 7).first()
sess.expunge(u)
assert_raises(orm_exc.DetachedInstanceError, getattr, u, "addresses")
def test_orderby(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
order_by=addresses.c.email_address,
)
},
)
q = fixture_session().query(User)
assert [
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
] == q.all()
def test_orderby_secondary(self):
"""tests that a regular mapper select on a single table can
order by a relationship to a second table"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, lazy="select")),
)
q = fixture_session().query(User)
result = (
q.filter(users.c.id == addresses.c.user_id)
.order_by(addresses.c.email_address)
.all()
)
assert [
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=7, addresses=[Address(id=1)]),
] == result
def test_orderby_desc(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="select",
order_by=[sa.desc(addresses.c.email_address)],
)
),
)
sess = fixture_session()
assert [
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=3, email_address="ed@bettyboop.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
] == sess.query(User).all()
def test_no_orphan(self):
"""test that a lazily loaded child object is not marked as an orphan"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all,delete-orphan", lazy="select"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
user = sess.get(User, 7)
assert getattr(User, "addresses").hasparent(
attributes.instance_state(user.addresses[0]), optimistic=True
)
assert not sa.orm.class_mapper(Address)._is_orphan(
attributes.instance_state(user.addresses[0])
)
def test_limit(self):
"""test limit operations combined with lazy-load relationships."""
(
users,
items,
order_items,
orders,
Item,
User,
Address,
Order,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, lazy="select"
)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
),
"orders": relationship(Order, lazy="select"),
},
)
sess = fixture_session()
q = sess.query(User)
if testing.against("mssql"):
result = q.limit(2).all()
assert self.static.user_all_result[:2] == result
else:
result = q.limit(2).offset(1).all()
assert self.static.user_all_result[1:3] == result
def test_distinct(self):
(
users,
items,
order_items,
orders,
Item,
User,
Address,
Order,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, lazy="select"
)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
),
"orders": relationship(Order, lazy="select"),
},
)
sess = fixture_session()
q = sess.query(User)
# use a union all to get a lot of rows to join against
u2 = users.alias("u2")
s = sa.union_all(
u2.select(),
u2.select(),
u2.select(),
).alias("u")
result = q.filter(s.c.id == User.id).order_by(User.id).distinct().all()
eq_(self.static.user_all_result, result)
def test_uselist_false_warning(self):
"""test that multiple rows received by a
uselist=False raises a warning."""
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"order": relationship(Order, uselist=False)},
)
self.mapper_registry.map_imperatively(Order, orders)
s = fixture_session()
u1 = s.query(User).filter(User.id == 7).one()
assert_warns(sa.exc.SAWarning, getattr, u1, "order")
def test_callable_bind(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
primaryjoin=and_(
users.c.id == addresses.c.user_id,
users.c.name
== bindparam("name", callable_=lambda: "ed"),
),
)
),
)
s = fixture_session()
ed = s.query(User).filter_by(name="ed").one()
eq_(
ed.addresses,
[
Address(id=2, user_id=8),
Address(id=3, user_id=8),
Address(id=4, user_id=8),
],
)
fred = s.query(User).filter_by(name="fred").one()
eq_(fred.addresses, []) # fred is missing
def test_one_to_many_scalar(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
uselist=False,
)
),
)
q = fixture_session().query(User)
result = q.filter(users.c.id == 7).all()
assert [User(id=7, address=Address(id=1))] == result
def test_many_to_one_binds(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
primary_key=[addresses.c.user_id, addresses.c.email_address],
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
Address,
uselist=False,
primaryjoin=sa.and_(
users.c.id == addresses.c.user_id,
addresses.c.email_address == "ed@bettyboop.com",
),
)
),
)
q = fixture_session().query(User)
eq_(
[
User(id=7, address=None),
User(id=8, address=Address(id=3)),
User(id=9, address=None),
User(id=10, address=None),
],
list(q),
)
def test_double_w_ac_against_subquery(self):
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 1).alias()
)
closed_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 0).alias()
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(Address, lazy=True),
open_orders=relationship(open_mapper, lazy="select"),
closed_orders=relationship(closed_mapper, lazy="select"),
),
)
self._run_double_test()
def test_double_w_ac(self):
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(Order, orders)
closed_mapper = aliased(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(Address, lazy=True),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
open_mapper.isopen == 1,
users.c.id == open_mapper.user_id,
),
lazy="select",
overlaps="closed_orders",
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closed_mapper.isopen == 0,
users.c.id == closed_mapper.user_id,
),
lazy="select",
overlaps="open_orders",
),
),
)
self._run_double_test()
def _run_double_test(self, no_items=False):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = fixture_session().query(User).order_by(User.id)
def items(*ids):
if no_items:
return {}
else:
return {"items": [Item(id=id_) for id_ in ids]}
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3, **items(3, 4, 5))],
closed_orders=[
Order(id=1, **items(1, 2, 3)),
Order(id=5, **items(5)),
],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4, **items(1, 5))],
closed_orders=[Order(id=2, **items(1, 2, 3))],
),
User(id=10),
],
q.all(),
)
if no_items:
self.assert_sql_count(testing.db, go, 10)
else:
self.assert_sql_count(testing.db, go, 15)
sess = fixture_session()
user = sess.get(User, 7)
closed_mapper = User.closed_orders.entity
open_mapper = User.open_orders.entity
eq_(
[Order(id=1), Order(id=5)],
fixture_session()
.query(closed_mapper)
.filter(with_parent(user, User.closed_orders))
.all(),
)
eq_(
[Order(id=3)],
fixture_session()
.query(open_mapper)
.filter(with_parent(user, User.open_orders))
.all(),
)
@testing.combinations(
("plain",), ("cte", testing.requires.ctes), ("subquery",), id_="s"
)
def test_map_to_cte_subq(self, type_):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
if type_ == "plain":
target = users
elif type_ == "cte":
target = select(users).cte()
elif type_ == "subquery":
target = select(users).subquery()
self.mapper_registry.map_imperatively(
User,
target,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
q = sess.query(Address).order_by(Address.id)
eq_(q.all(), self.static.address_user_result)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword, secondary=item_keywords, lazy="select"
)
),
)
q = fixture_session().query(Item)
assert self.static.item_keyword_result == q.all()
eq_(
self.static.item_keyword_result[0:2],
q.join(Item.keywords).filter(keywords.c.name == "red").all(),
)
def test_uses_get(self):
"""test that a simple many-to-one lazyload optimizes
to use query.get()."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
for pj in (
None,
users.c.id == addresses.c.user_id,
addresses.c.user_id == users.c.id,
):
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(
self.mapper_registry.map_imperatively(User, users),
lazy="select",
primaryjoin=pj,
)
),
)
with fixture_session() as sess:
# load address
a1 = (
sess.query(Address)
.filter_by(email_address="ed@wood.com")
.one()
)
# load user that is attached to the address
u1 = sess.get(User, 8)
def go():
# lazy load of a1.user should get it from the session
assert a1.user is u1
self.assert_sql_count(testing.db, go, 0)
sa.orm.clear_mappers()
def test_use_get_lambda_key_wont_go_stale(self):
"""test [ticket:6055]"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
um = self.mapper_registry.map_imperatively(User, users)
am = self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
is_true(am.relationships.user._lazy_strategy.use_get)
with fixture_session() as sess:
a1 = sess.get(Address, 2)
eq_(a1.user.id, 8)
um._reset_memoizations()
with fixture_session() as sess:
a1 = sess.get(Address, 2)
eq_(a1.user.id, 8)
@testing.only_on("sqlite")
def test_annotated_fn_criteria(self, registry, connection):
"""this test is a secondary test for the compilation of functions
that are annotated.
"""
@registry.mapped
class A:
__tablename__ = "a"
id = Column(Integer, primary_key=True)
_date = Column(Date, default=func.current_date())
b_id = Column(Integer, ForeignKey("b.id"))
b = relationship("B")
@registry.mapped
class B:
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_s = relationship(
"A",
primaryjoin="and_(B.id == A.b_id, "
"A._date >= func.current_date())",
viewonly=True,
)
registry.metadata.create_all(connection)
with Session(connection) as sess:
b1 = B(id=1)
a1 = A(b=b1)
sess.add_all([a1, b1])
sess.commit()
is_(sess.get(B, 1).a_s[0], a1)
def test_uses_get_compatible_types(self):
"""test the use_get optimization with compatible
but non-identical types"""
User, Address = self.classes.User, self.classes.Address
class IntDecorator(TypeDecorator):
impl = Integer
cache_ok = True
class SmallintDecorator(TypeDecorator):
impl = SmallInteger
cache_ok = True
class SomeDBInteger(sa.Integer):
pass
for tt in [
Integer,
SmallInteger,
IntDecorator,
SmallintDecorator,
SomeDBInteger,
]:
m = sa.MetaData()
users = Table(
"users",
m,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(30), nullable=False),
)
addresses = Table(
"addresses",
m,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", tt, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(
self.mapper_registry.map_imperatively(User, users)
)
),
)
with fixture_session() as sess:
# load address
a1 = (
sess.query(Address)
.filter_by(email_address="ed@wood.com")
.one()
)
# load user that is attached to the address
u1 = sess.get(User, 8)
def go():
# lazy load of a1.user should get it from the session
assert a1.user is u1
self.assert_sql_count(testing.db, go, 0)
sa.orm.clear_mappers()
def test_many_to_one(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(
self.mapper_registry.map_imperatively(User, users),
lazy="select",
)
),
)
sess = fixture_session()
q = sess.query(Address)
a = q.filter(addresses.c.id == 1).one()
assert a.user is not None
u1 = sess.get(User, 7)
assert a.user is u1
def test_backrefs_dont_lazyload(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session(autoflush=False)
ad = sess.query(Address).filter_by(id=1).one()
assert ad.user.id == 7
def go():
ad.user = None
assert ad.user is None
self.assert_sql_count(testing.db, go, 0)
u1 = sess.query(User).filter_by(id=7).one()
def go():
assert ad not in u1.addresses
self.assert_sql_count(testing.db, go, 1)
sess.expire(u1, ["addresses"])
def go():
assert ad in u1.addresses
self.assert_sql_count(testing.db, go, 1)
sess.expire(u1, ["addresses"])
ad2 = Address()
def go():
ad2.user = u1
assert ad2.user is u1
self.assert_sql_count(testing.db, go, 0)
def go():
assert ad2 in u1.addresses
self.assert_sql_count(testing.db, go, 1)
| LazyTest |
python | allegroai__clearml | clearml/backend_interface/metrics/events.py | {
"start": 7323,
"end": 7863
} | class ____(MetricsEventAdapter):
def __init__(self, metric: str, variant: str, src: str, iter: int = 0, **kwargs: Any) -> None:
self._url = src
parts = urlparse(src)
self._key = urlunparse(("", "", parts.path, parts.params, parts.query, parts.fragment))
super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs)
def get_api_event(self) -> "events.MetricsImageEvent":
return events.MetricsImageEvent(url=self._url, key=self._key, **self._get_base_dict())
| ImageEventNoUpload |
python | apache__airflow | providers/opensearch/src/airflow/providers/opensearch/operators/opensearch.py | {
"start": 5724,
"end": 8006
} | class ____(BaseOperator):
"""
Add a new document to a given Index or overwrite an existing one.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpenSearchAddDocumentOperator`
:param index_name: The name of the index to put the document.
:param document: A dictionary representation of the document.
:param document_id: The id for the document in the index.
:param doc_class: A Document subclassed object using opensearch-dsl
:param opensearch_conn_id: opensearch connection to use
"""
def __init__(
self,
*,
index_name: str | None = None,
document: dict[str, Any] | None = None,
doc_id: int | None = None,
doc_class: Any | None = None,
opensearch_conn_id: str = "opensearch_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.index_name = index_name
self.document = document
self.doc_id = doc_id
self.doc_class = doc_class
self.opensearch_conn_id = opensearch_conn_id
@cached_property
def hook(self) -> OpenSearchHook:
"""Get an instance of an OpenSearchHook."""
return OpenSearchHook(open_search_conn_id=self.opensearch_conn_id, log_query=False)
def execute(self, context: Context) -> Any:
"""Save a document to a given index on an OpenSearch cluster."""
if self.doc_class is not None:
try:
doc = self.doc_class.init(using=self.hook.client)
result = doc.save(using=self.hook.client)
except OpenSearchException as e:
raise AirflowException(e)
elif self.index_name is not None and self.document is not None and self.doc_id is not None:
try:
result = self.hook.index(
index_name=self.index_name, document=self.document, doc_id=self.doc_id
)
except OpenSearchException as e:
raise AirflowException(e)
else:
raise AirflowException(
"Index name, document dictionary and doc_id or a Document subclassed object is required."
)
return result
| OpenSearchAddDocumentOperator |
python | walkccc__LeetCode | solutions/278. First Bad Version/278.py | {
"start": 0,
"end": 205
} | class ____:
def firstBadVersion(self, n: int) -> int:
l = 1
r = n
while l < r:
m = (l + r) >> 1
if isBadVersion(m):
r = m
else:
l = m + 1
return l
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 68051,
"end": 68423
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.subm = TwoLayerLinearModel()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.subm(x)
x = self.fc(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return self.subm.get_example_inputs()
| LinearModelWithSubmodule |
python | allegroai__clearml | clearml/utilities/requests_toolbelt/multipart/decoder.py | {
"start": 481,
"end": 850
} | class ____(Exception):
pass
def _header_parser(string, encoding):
major = sys.version_info[0]
if major == 3:
string = string.decode(encoding)
headers = email.parser.HeaderParser().parsestr(string).items()
return (
(encode_with(k, encoding), encode_with(v, encoding))
for k, v in headers
)
| NonMultipartContentTypeException |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Data.py | {
"start": 13164,
"end": 13543
} | class ____(CtrlNode):
"""Calculate the standard deviation of an array across an axis.
"""
nodeName = 'Stdev'
uiTemplate = [
('axis', 'intSpin', {'value': -0, 'min': -1, 'max': 1000000}),
]
def processData(self, data):
s = self.stateGroup.state()
ax = None if s['axis'] == -1 else s['axis']
return data.std(axis=ax)
| Stdev |
python | squidfunk__mkdocs-material | material/plugins/blog/structure/options.py | {
"start": 1654,
"end": 2110
} | class ____(Dict[str, datetime]):
# Initialize date dictionary
def __init__(self, data: dict):
super().__init__(data)
# Ensure presence of `date.created`
self.created: datetime = data["created"]
# Allow attribute access
def __getattr__(self, name: str):
if name in self:
return self[name]
# -----------------------------------------------------------------------------
# Post date option
| DateDict |
python | encode__httpx | httpx/_client.py | {
"start": 4555,
"end": 5330
} | class ____(AsyncByteStream):
"""
An async byte stream that is bound to a given response instance, and that
ensures the `response.elapsed` is set once the response is closed.
"""
def __init__(
self, stream: AsyncByteStream, response: Response, start: float
) -> None:
self._stream = stream
self._response = response
self._start = start
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
async for chunk in self._stream:
yield chunk
async def aclose(self) -> None:
elapsed = time.perf_counter() - self._start
self._response.elapsed = datetime.timedelta(seconds=elapsed)
await self._stream.aclose()
EventHook = typing.Callable[..., typing.Any]
| BoundAsyncStream |
python | realpython__materials | python-class/animals.py | {
"start": 383,
"end": 460
} | class ____(Mammal):
def walk(self):
print("The cat is walking")
| Cat |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0013_add_optout_email_config_file_deprecation.py | {
"start": 149,
"end": 991
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("core", "0012_add_newsletter_setting"),
]
operations = [
migrations.AddField(
model_name="historicaluserprofile",
name="optout_email_config_file_deprecation",
field=models.BooleanField(
default=False,
null=True,
verbose_name="Opt-out from email about 'Config file deprecation'",
),
),
migrations.AddField(
model_name="userprofile",
name="optout_email_config_file_deprecation",
field=models.BooleanField(
default=False,
null=True,
verbose_name="Opt-out from email about 'Config file deprecation'",
),
),
]
| Migration |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_katz_centrality.py | {
"start": 9958,
"end": 10727
} | class ____(TestKatzCentralityDirected):
@classmethod
def setup_class(cls):
global np
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
super().setup_class()
def test_katz_centrality_weighted(self):
G = self.G
alpha = self.G.alpha
p = nx.katz_centrality_numpy(G, alpha, weight="weight")
for a, b in zip(list(p.values()), self.G.evc):
assert a == pytest.approx(b, abs=1e-7)
def test_katz_centrality_unweighted(self):
H = self.H
alpha = self.H.alpha
p = nx.katz_centrality_numpy(H, alpha, weight="weight")
for a, b in zip(list(p.values()), self.H.evc):
assert a == pytest.approx(b, abs=1e-7)
| TestKatzCentralityDirectedNumpy |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/dml.py | {
"start": 1754,
"end": 3183
} | class ____(SyntaxExtension, ClauseElement):
stringify_dialect = "mysql"
__visit_name__ = "mysql_dml_limit_clause"
_traverse_internals: _TraverseInternalsType = [
("_limit_clause", InternalTraversal.dp_clauseelement),
]
def __init__(self, limit: _LimitOffsetType):
self._limit_clause = coercions.expect(
roles.LimitOffsetRole, limit, name=None, type_=None
)
def apply_to_update(self, update_stmt: Update) -> None:
update_stmt.apply_syntax_extension_point(
self.append_replacing_same_type, "post_criteria"
)
def apply_to_delete(self, delete_stmt: Delete) -> None:
delete_stmt.apply_syntax_extension_point(
self.append_replacing_same_type, "post_criteria"
)
def insert(table: _DMLTableArgument) -> Insert:
"""Construct a MySQL/MariaDB-specific variant :class:`_mysql.Insert`
construct.
.. container:: inherited_member
The :func:`sqlalchemy.dialects.mysql.insert` function creates
a :class:`sqlalchemy.dialects.mysql.Insert`. This class is based
on the dialect-agnostic :class:`_sql.Insert` construct which may
be constructed using the :func:`_sql.insert` function in
SQLAlchemy Core.
The :class:`_mysql.Insert` construct includes additional methods
:meth:`_mysql.Insert.on_duplicate_key_update`.
"""
return Insert(table)
| DMLLimitClause |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/multi_agent_workflow.py | {
"start": 2797,
"end": 2918
} | class ____(WorkflowMeta, ABCMeta):
"""Metaclass for AgentWorkflow that inherits from WorkflowMeta."""
| AgentWorkflowMeta |
python | crytic__slither | plugin_example/slither_my_plugin/detectors/example.py | {
"start": 91,
"end": 823
} | class ____(AbstractDetector): # pylint: disable=too-few-public-methods
"""
Documentation
"""
ARGUMENT = "mydetector" # slither will launch the detector with slither.py --mydetector
HELP = "Help printed by slither"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://www.example.com/#example-detector"
WIKI_TITLE = "example detector"
WIKI_DESCRIPTION = "This is an example detector that always generates a finding"
WIKI_EXPLOIT_SCENARIO = "Scenario goes here"
WIKI_RECOMMENDATION = "Customize the detector"
def _detect(self):
info = "This is an example!"
json = self.generate_result(info)
return [json]
| Example |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_organization_uptime_summary.py | {
"start": 12561,
"end": 14950
} | class ____(OrganizationUptimeSummaryBaseTest, UptimeResultEAPTestCase):
__test__ = True
def store_uptime_data(
self,
subscription_id,
check_status,
incident_status=IncidentStatus.NO_INCIDENT,
scheduled_check_time=None,
check_duration_us=None,
):
kwargs = {
"subscription_id": uuid.UUID(subscription_id).hex,
"guid": uuid.UUID(subscription_id).hex,
"request_url": "https://santry.io",
"check_status": check_status,
"incident_status": incident_status,
"scheduled_check_time": scheduled_check_time,
}
if check_duration_us is not None:
kwargs["check_duration_us"] = check_duration_us
uptime_result = self.create_eap_uptime_result(**kwargs)
self.store_uptime_results([uptime_result])
def test_average_duration_available(self) -> None:
"""Test that average duration is available and correctly calculated for EAP uptime results."""
duration_subscription_id = uuid.uuid4().hex
duration_subscription = self.create_uptime_subscription(
url="https://duration-test.com", subscription_id=duration_subscription_id
)
duration_detector = self.create_uptime_detector(uptime_subscription=duration_subscription)
# Store checks with specific durations
durations = [100000, 200000, 300000] # 100ms, 200ms, 300ms in microseconds
for duration in durations:
self.store_uptime_data(
duration_subscription_id,
"success",
check_duration_us=duration,
)
with self.feature(self.features):
response = self.get_success_response(
self.organization.slug,
project=[self.project.id],
uptimeDetectorId=[str(duration_detector.id)],
since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(),
until=datetime.now(timezone.utc).timestamp(),
)
assert response.data is not None
data = response.data
stats = data[duration_detector.id]
assert stats["totalChecks"] == 3
# Average should be (100000 + 200000 + 300000) / 3 = 200000
assert stats["avgDurationUs"] == 200000.0
| OrganizationUptimeSummaryEAPTest |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 13816,
"end": 16062
} | class ____(AnsibleSerializable, metaclass=abc.ABCMeta):
_validation_allow_subclasses = True
_validation_auto_enabled = True
def _as_dict(self) -> t.Dict[str, t.Any]:
# omit None values when None is the field default
# DTFIX-FUTURE: this implementation means we can never change the default on fields which have None for their default
# other defaults can be changed -- but there's no way to override this behavior either way for other default types
# it's a trip hazard to have the default logic here, rather than per field (or not at all)
# consider either removing the filtering or requiring it to be explicitly set per field using dataclass metadata
fields = ((field, getattr(self, field.name)) for field in dataclasses.fields(self))
return {field.name: value for field, value in fields if value is not None or field.default is not None}
@classmethod
def _from_dict(cls, d: t.Dict[str, t.Any]) -> t.Self:
# DTFIX-FUTURE: optimize this to avoid the dataclasses fields metadata and get_origin stuff at runtime
type_hints = t.get_type_hints(cls)
mutated_dict: dict[str, t.Any] | None = None
for field in dataclasses.fields(cls):
if t.get_origin(type_hints[field.name]) is tuple: # NOTE: only supports bare tuples, not optional or inside a union
if type(field_value := d.get(field.name)) is list: # pylint: disable=unidiomatic-typecheck
if mutated_dict is None:
mutated_dict = d.copy()
mutated_dict[field.name] = tuple(field_value)
return cls(**(mutated_dict or d))
def __init_subclass__(cls, **kwargs) -> None:
super(AnsibleSerializableDataclass, cls).__init_subclass__(**kwargs) # cannot use super() without arguments when using slots
if cls._validation_auto_enabled:
try:
_dataclass_validation.inject_post_init_validation(cls, cls._validation_allow_subclasses) # code gen a real __post_init__ method
except Exception as ex:
raise Exception(f'Validation code generation failed on {cls}.') from ex
| AnsibleSerializableDataclass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.