language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 18229,
"end": 18662
} | class ____(HTTPException):
"""*424* `Failed Dependency`
Used if the method could not be performed on the resource
because the requested action depended on another action and that action failed.
"""
code = 424
description = (
"The method could not be performed on the resource because the"
" requested action depended on another action and that action"
" failed."
)
| FailedDependency |
python | python-openxml__python-docx | src/docx/oxml/text/run.py | {
"start": 8825,
"end": 9432
} | class ____(BaseOxmlElement):
"""`<w:t>` element, containing a sequence of characters within a run."""
def __str__(self) -> str:
"""Text contained in this element, the empty string if it has no content.
This property allows this run inner-content element to be queried for its text
the same way as other run-content elements are. In particular, this never
returns None, as etree._Element does when there is no content.
"""
return self.text or ""
# ------------------------------------------------------------------------------------
# Utility
| CT_Text |
python | doocs__leetcode | solution/2000-2099/2019.The Score of Students Solving Math Expression/Solution.py | {
"start": 0,
"end": 1210
} | class ____:
def scoreOfStudents(self, s: str, answers: List[int]) -> int:
def cal(s: str) -> int:
res, pre = 0, int(s[0])
for i in range(1, n, 2):
if s[i] == "*":
pre *= int(s[i + 1])
else:
res += pre
pre = int(s[i + 1])
res += pre
return res
n = len(s)
x = cal(s)
m = (n + 1) >> 1
f = [[set() for _ in range(m)] for _ in range(m)]
for i in range(m):
f[i][i] = {int(s[i << 1])}
for i in range(m - 1, -1, -1):
for j in range(i, m):
for k in range(i, j):
for l in f[i][k]:
for r in f[k + 1][j]:
if s[k << 1 | 1] == "+" and l + r <= 1000:
f[i][j].add(l + r)
elif s[k << 1 | 1] == "*" and l * r <= 1000:
f[i][j].add(l * r)
cnt = Counter(answers)
ans = cnt[x] * 5
for k, v in cnt.items():
if k != x and k in f[0][m - 1]:
ans += v << 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 848000,
"end": 848392
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ProjectV2Field", graphql_name="node")
"""The item at the end of the edge."""
| ProjectV2FieldEdge |
python | plotly__plotly.py | plotly/graph_objs/icicle/marker/_colorbar.py | {
"start": 233,
"end": 61611
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle.marker"
_path_str = "icicle.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.icicle.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.icicle.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.icicle.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.icicle.marker.
colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
icicle.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.icicle.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.icicle.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.icicle.marker.c
olorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.icicle
.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
icicle.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.icicle.marker.colorbar.Tit
le` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.icicle.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.icicle.marker.c
olorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.icicle
.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
icicle.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.icicle.marker.colorbar.Tit
le` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | facebook__pyre-check | client/tests/error_test.py | {
"start": 385,
"end": 7310
} | class ____(unittest.TestCase):
fake_error = {
"line": 4,
"column": 11,
"stop_line": 4,
"stop_column": 21,
"path": "c.py",
"code": -1,
"name": "Revealed type",
"description": "Fake error",
"define": "c.$toplevel",
}
def test_json_parsing(self) -> None:
def assert_parsed(json: Dict[str, Any], expected: Error) -> None:
self.assertEqual(Error.from_json(json), expected)
def assert_not_parsed(json: Dict[str, Any]) -> None:
with self.assertRaises(ErrorParsingFailure):
Error.from_json(json)
assert_not_parsed({})
assert_not_parsed({"derp": 42})
assert_not_parsed({"line": "abc", "column": []})
assert_not_parsed({"line": 1, "column": 1})
assert_parsed(
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 2,
"path": "test.py",
"code": 1,
"name": "Some name",
"description": "Some description",
},
expected=Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("test.py"),
code=1,
name="Some name",
description="Some description",
),
)
assert_parsed(
{
"line": 2,
"column": 2,
"stop_line": 3,
"stop_column": 3,
"path": Path("test.py"),
"code": 2,
"name": "Some name",
"description": "Some description",
"long_description": "Some long description",
},
expected=Error(
line=2,
column=2,
stop_line=3,
stop_column=3,
path=Path("test.py"),
code=2,
name="Some name",
description="Some description",
),
)
assert_parsed(
{
"line": 3,
"column": 3,
"stop_line": 4,
"stop_column": 4,
"path": Path("test.py"),
"code": 3,
"name": "Some name",
"description": "Some description",
"concise_description": "Some concise description",
},
expected=Error(
line=3,
column=3,
stop_line=4,
stop_column=4,
path=Path("test.py"),
code=3,
name="Some name",
description="Some description",
concise_description="Some concise description",
),
)
def test_taint_configuration_error_parsing(self) -> None:
def assert_parsed(
json: Dict[str, Any], expected: TaintConfigurationError
) -> None:
self.assertEqual(TaintConfigurationError.from_json(json), expected)
def assert_not_parsed(json: Dict[str, Any]) -> None:
with self.assertRaises(ErrorParsingFailure):
TaintConfigurationError.from_json(json)
assert_not_parsed({})
assert_not_parsed({"derp": 42})
assert_not_parsed({"line": "abc", "column": []})
assert_not_parsed({"line": 1, "column": 1})
assert_parsed(
{
"path": "test.py",
"description": "Some description",
"code": 1001,
"location": None,
},
expected=TaintConfigurationError(
path=Path("test.py"),
description="Some description",
code=1001,
start_line=None,
start_column=None,
stop_line=None,
stop_column=None,
),
)
assert_parsed(
{
"path": None,
"description": "Some description",
"code": 1001,
"location": None,
},
expected=TaintConfigurationError(
path=None,
description="Some description",
code=1001,
start_line=None,
start_column=None,
stop_line=None,
stop_column=None,
),
)
assert_parsed(
{
"path": None,
"description": "Some description",
"code": 1001,
"location": {
"start": {"line": 1, "column": 2},
"stop": {"line": 3, "column": 4},
},
},
expected=TaintConfigurationError(
path=None,
description="Some description",
code=1001,
start_line=1,
start_column=2,
stop_line=3,
stop_column=4,
),
)
def test_model_verification_error_parsing(self) -> None:
def assert_parsed(
json: Dict[str, Any], expected: ModelVerificationError
) -> None:
self.assertEqual(ModelVerificationError.from_json(json), expected)
def assert_not_parsed(json: Dict[str, Any]) -> None:
with self.assertRaises(ErrorParsingFailure):
ModelVerificationError.from_json(json)
assert_not_parsed({})
assert_not_parsed({"derp": 42})
assert_not_parsed({"line": "abc", "column": []})
assert_not_parsed({"line": 1, "column": 1})
assert_parsed(
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 3,
"path": "test.py",
"description": "Some description",
"code": 1001,
},
expected=ModelVerificationError(
line=1,
column=1,
stop_line=2,
stop_column=3,
path=Path("test.py"),
description="Some description",
code=1001,
),
)
assert_parsed(
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 3,
"path": None,
"description": "Some description",
"code": 1001,
},
expected=ModelVerificationError(
line=1,
column=1,
stop_line=2,
stop_column=3,
path=None,
description="Some description",
code=1001,
),
)
| ErrorTest |
python | huggingface__transformers | src/transformers/models/blt/modeling_blt.py | {
"start": 19426,
"end": 20109
} | class ____(PreTrainedModel):
config: BltConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["BltTransformerLayer"]
_can_compile_fullgraph = False # static cache cannot have different shapes for each layer
_supports_sdpa = True
_supports_flash_attn = False
_supports_flex_attn = False
_supports_attention_backend = False
_can_record_outputs = {
"hidden_states": OutputRecorder(BltTransformerLayer, index=0, layer_name="local_decoder"),
"attentions": OutputRecorder(BltSelfAttention, index=1, layer_name="local_decoder"),
}
| BltPreTrainedModel |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 109133,
"end": 109231
} | class ____(BaseModel, extra="forbid"):
sample: "Sample" = Field(..., description="")
| SampleQuery |
python | django__django | tests/template_tests/test_response.py | {
"start": 13242,
"end": 13769
} | class ____(SimpleTestCase):
def test_custom_urlconf(self):
response = self.client.get("/template_response_view/")
self.assertContains(response, "This is where you can find the snark: /snark/")
@modify_settings(
MIDDLEWARE={
"append": [
"django.middleware.cache.FetchFromCacheMiddleware",
"django.middleware.cache.UpdateCacheMiddleware",
],
},
)
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2, ROOT_URLCONF="template_tests.alternate_urls"
)
| CustomURLConfTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 28992,
"end": 30425
} | class ____(graphene.Mutation):
"""Reports runless events for an asset or a subset of its partitions."""
Output = graphene.NonNull(GrapheneReportRunlessAssetEventsResult)
class Arguments:
eventParams = graphene.Argument(graphene.NonNull(GrapheneReportRunlessAssetEventsParams))
class Meta:
name = "ReportRunlessAssetEventsMutation"
@capture_error
@require_permission_check(Permissions.REPORT_RUNLESS_ASSET_EVENTS)
def mutate(
self, graphene_info: ResolveInfo, eventParams: GrapheneReportRunlessAssetEventsParams
):
event_type = eventParams["eventType"].to_dagster_event_type()
asset_key = AssetKey.from_graphql_input(eventParams["assetKey"])
partition_keys = eventParams.get("partitionKeys", None)
description = eventParams.get("description", None)
reporting_user_tags = {**graphene_info.context.get_reporting_user_tags()}
asset_graph = graphene_info.context.asset_graph
assert_permission_for_asset_graph(
graphene_info, asset_graph, [asset_key], Permissions.REPORT_RUNLESS_ASSET_EVENTS
)
return report_runless_asset_events(
graphene_info,
event_type=event_type,
asset_key=asset_key,
partition_keys=partition_keys,
description=description,
tags=reporting_user_tags,
)
| GrapheneReportRunlessAssetEventsMutation |
python | facebook__pyre-check | client/commands/profile.py | {
"start": 1124,
"end": 1273
} | class ____:
name: str
worker_id: int
pid: int
timestamp: int
tags: Dict[str, str]
@dataclasses.dataclass(frozen=True)
| EventMetadata |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_magazine.py | {
"start": 592,
"end": 11734
} | class ____(fixtures.MappedTest):
@classmethod
def setup_classes(cls):
Base = cls.Comparable
class Publication(Base):
pass
class Issue(Base):
pass
class Location(Base):
pass
class LocationName(Base):
pass
class PageSize(Base):
pass
class Magazine(Base):
pass
class Page(Base):
pass
class MagazinePage(Page):
pass
class ClassifiedPage(MagazinePage):
pass
@classmethod
def define_tables(cls, metadata):
Table(
"publication",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(45), default=""),
)
Table(
"issue",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("publication_id", Integer, ForeignKey("publication.id")),
Column("issue", Integer),
)
Table(
"location",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("issue_id", Integer, ForeignKey("issue.id")),
Column("ref", CHAR(3), default=""),
Column(
"location_name_id", Integer, ForeignKey("location_name.id")
),
)
Table(
"location_name",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(45), default=""),
)
Table(
"magazine",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("location_id", Integer, ForeignKey("location.id")),
Column("page_size_id", Integer, ForeignKey("page_size.id")),
)
Table(
"page",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("page_no", Integer),
Column("type", CHAR(1), default="p"),
)
Table(
"magazine_page",
metadata,
Column(
"page_id", Integer, ForeignKey("page.id"), primary_key=True
),
Column("magazine_id", Integer, ForeignKey("magazine.id")),
Column("orders", Text, default=""),
)
Table(
"classified_page",
metadata,
Column(
"magazine_page_id",
Integer,
ForeignKey("magazine_page.page_id"),
primary_key=True,
),
Column("titles", String(45), default=""),
)
Table(
"page_size",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("width", Integer),
Column("height", Integer),
Column("name", String(45), default=""),
)
def _generate_data(self):
(
Publication,
Issue,
Location,
LocationName,
PageSize,
Magazine,
Page,
MagazinePage,
ClassifiedPage,
) = self.classes(
"Publication",
"Issue",
"Location",
"LocationName",
"PageSize",
"Magazine",
"Page",
"MagazinePage",
"ClassifiedPage",
)
london = LocationName(name="London")
pub = Publication(name="Test")
issue = Issue(issue=46, publication=pub)
location = Location(ref="ABC", name=london, issue=issue)
page_size = PageSize(name="A4", width=210, height=297)
magazine = Magazine(location=location, size=page_size)
ClassifiedPage(magazine=magazine, page_no=1)
MagazinePage(magazine=magazine, page_no=2)
ClassifiedPage(magazine=magazine, page_no=3)
return pub
def _setup_mapping(self, use_unions, use_joins):
(
Publication,
Issue,
Location,
LocationName,
PageSize,
Magazine,
Page,
MagazinePage,
ClassifiedPage,
) = self.classes(
"Publication",
"Issue",
"Location",
"LocationName",
"PageSize",
"Magazine",
"Page",
"MagazinePage",
"ClassifiedPage",
)
self.mapper_registry.map_imperatively(
Publication, self.tables.publication
)
self.mapper_registry.map_imperatively(
Issue,
self.tables.issue,
properties={
"publication": relationship(
Publication,
backref=backref("issues", cascade="all, delete-orphan"),
)
},
)
self.mapper_registry.map_imperatively(
LocationName, self.tables.location_name
)
self.mapper_registry.map_imperatively(
Location,
self.tables.location,
properties={
"issue": relationship(
Issue,
backref=backref(
"locations",
lazy="joined",
cascade="all, delete-orphan",
),
),
"name": relationship(LocationName),
},
)
self.mapper_registry.map_imperatively(PageSize, self.tables.page_size)
self.mapper_registry.map_imperatively(
Magazine,
self.tables.magazine,
properties={
"location": relationship(
Location, backref=backref("magazine", uselist=False)
),
"size": relationship(PageSize),
},
)
if use_unions:
page_join = polymorphic_union(
{
"m": self.tables.page.join(self.tables.magazine_page),
"c": self.tables.page.join(self.tables.magazine_page).join(
self.tables.classified_page
),
"p": self.tables.page.select()
.where(self.tables.page.c.type == "p")
.subquery(),
},
None,
"page_join",
)
page_mapper = self.mapper_registry.map_imperatively(
Page,
self.tables.page,
with_polymorphic=("*", page_join),
polymorphic_on=page_join.c.type,
polymorphic_identity="p",
)
elif use_joins:
page_join = self.tables.page.outerjoin(
self.tables.magazine_page
).outerjoin(self.tables.classified_page)
page_mapper = self.mapper_registry.map_imperatively(
Page,
self.tables.page,
with_polymorphic=("*", page_join),
polymorphic_on=self.tables.page.c.type,
polymorphic_identity="p",
)
else:
page_mapper = self.mapper_registry.map_imperatively(
Page,
self.tables.page,
polymorphic_on=self.tables.page.c.type,
polymorphic_identity="p",
)
if use_unions:
magazine_join = polymorphic_union(
{
"m": self.tables.page.join(self.tables.magazine_page),
"c": self.tables.page.join(self.tables.magazine_page).join(
self.tables.classified_page
),
},
None,
"page_join",
)
magazine_page_mapper = self.mapper_registry.map_imperatively(
MagazinePage,
self.tables.magazine_page,
with_polymorphic=("*", magazine_join),
inherits=page_mapper,
polymorphic_identity="m",
properties={
"magazine": relationship(
Magazine,
backref=backref(
"pages", order_by=magazine_join.c.page_no
),
)
},
)
elif use_joins:
magazine_join = self.tables.page.join(
self.tables.magazine_page
).outerjoin(self.tables.classified_page)
magazine_page_mapper = self.mapper_registry.map_imperatively(
MagazinePage,
self.tables.magazine_page,
with_polymorphic=("*", magazine_join),
inherits=page_mapper,
polymorphic_identity="m",
properties={
"magazine": relationship(
Magazine,
backref=backref(
"pages", order_by=self.tables.page.c.page_no
),
)
},
)
else:
magazine_page_mapper = self.mapper_registry.map_imperatively(
MagazinePage,
self.tables.magazine_page,
inherits=page_mapper,
polymorphic_identity="m",
properties={
"magazine": relationship(
Magazine,
backref=backref(
"pages", order_by=self.tables.page.c.page_no
),
)
},
)
self.mapper_registry.map_imperatively(
ClassifiedPage,
self.tables.classified_page,
inherits=magazine_page_mapper,
polymorphic_identity="c",
primary_key=[self.tables.page.c.id],
)
@testing.combinations(
("unions", True, False),
("joins", False, True),
("plain", False, False),
id_="iaa",
)
def test_magazine_round_trip(self, use_unions, use_joins):
self._setup_mapping(use_unions, use_joins)
Publication = self.classes.Publication
session = fixture_session()
pub = self._generate_data()
session.add(pub)
session.commit()
session.close()
p = session.query(Publication).filter(Publication.name == "Test").one()
test_pub = self._generate_data()
eq_(p, test_pub)
eq_(
p.issues[0].locations[0].magazine.pages,
test_pub.issues[0].locations[0].magazine.pages,
)
| MagazineTest |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 5483,
"end": 11309
} | class ____(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data[0]
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "POST" not allowed.'}
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk='1').render()
assert response.status_code == status.HTTP_200_OK
assert dict(response.data) == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.patch('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_204_NO_CONTENT
assert response.content == b''
ids = [obj.id for obj in self.objects.all()]
assert ids == [2, 3]
def test_get_instance_view_incorrect_arg(self):
"""
GET requests with an incorrect pk type, should raise 404, not 500.
Regression test for #890.
"""
request = factory.get('/a')
with self.assertNumQueries(0):
response = self.view(request, pk='a').render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should return 404 if
an object does not currently exist.
"""
self.objects.get(id=1).delete()
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_to_filtered_out_instance(self):
"""
PUT requests to an URL of instance which is filtered out should not be
able to create new objects.
"""
data = {'text': 'foo'}
filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
request = factory.put(f'/{filtered_out_pk}', data, format='json')
response = self.view(request, pk=filtered_out_pk).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_patch_cannot_create_an_object(self):
"""
PATCH requests should not be able to create objects.
"""
data = {'text': 'foobar'}
request = factory.patch('/999', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=999).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
assert not self.objects.filter(id=999).exists()
def test_put_error_instance_view(self):
"""
Incorrect PUT requests in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.put('/', data, HTTP_ACCEPT='text/html')
response = self.view(request, pk=1).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
assert expected_error in response.rendered_content.decode()
| TestInstanceView |
python | walkccc__LeetCode | solutions/3290. Maximum Multiplication Score/3290-2.py | {
"start": 0,
"end": 329
} | class ____:
def maxScore(self, a: list[int], b: list[int]) -> int:
# dp[i] := the maximum score of a[0..i]
dp = [-math.inf] * 4
for num in b:
for i in reversed(range(4)):
# Skip `num` or pair a[i] with `num`.
dp[i] = max(dp[i], (dp[i - 1] if i > 0 else 0) + a[i] * num)
return dp[3]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/rectangle-area.py | {
"start": 29,
"end": 501
} | class ____(object):
# @param {integer} A
# @param {integer} B
# @param {integer} C
# @param {integer} D
# @param {integer} E
# @param {integer} F
# @param {integer} G
# @param {integer} H
# @return {integer}
def computeArea(self, A, B, C, D, E, F, G, H):
return (D - B) * (C - A) + \
(G - E) * (H - F) - \
max(0, (min(C, G) - max(A, E))) * \
max(0, (min(D, H) - max(B, F)))
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/perflint/PERF402.py | {
"start": 436,
"end": 800
} | class ____:
def append(self, x):
pass
def f():
items = [1, 2, 3, 4]
result = Foo()
for i in items:
result.append(i) # OK
def f():
import sys
for path in ("foo", "bar"):
sys.path.append(path) # OK
def f():
items = [1, 2, 3, 4]
result = []
async for i in items:
result.append(i) # PERF402
| Foo |
python | kamyu104__LeetCode-Solutions | Python/spiral-matrix-iv.py | {
"start": 70,
"end": 172
} | class ____(object):
def __init__(self, val=0, next=None):
pass
# linked list, array
| ListNode |
python | pandas-dev__pandas | pandas/io/json/_json.py | {
"start": 5773,
"end": 7213
} | class ____(ABC):
_default_orient: str
def __init__(
self,
obj: NDFrame,
orient: str | None,
date_format: str,
double_precision: int,
ensure_ascii: bool,
date_unit: str,
index: bool,
default_handler: Callable[[Any], JSONSerializable] | None = None,
indent: int = 0,
) -> None:
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.indent = indent
self._format_axes()
def _format_axes(self) -> None:
raise AbstractMethodError(self)
def write(self) -> str:
iso_dates = self.date_format == "iso"
return ujson_dumps(
self.obj_to_write,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=iso_dates,
default_handler=self.default_handler,
indent=self.indent,
)
@property
@abstractmethod
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
"""Object to write in JSON format."""
| Writer |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail_baseConfig.py | {
"start": 5745,
"end": 6033
} | class ____(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
class Config:
validate_by_name = True
DynamicAliasModel2(y='y', z=1)
# MYPY: error: Unexpected keyword argument "y" for "DynamicAliasModel2" [call-arg]
DynamicAliasModel2(x='y', z=1)
| DynamicAliasModel2 |
python | pytest-dev__pytest | testing/test_assertrewrite.py | {
"start": 63236,
"end": 70563
} | class ____:
def test_option_default(self, pytester: Pytester) -> None:
config = pytester.parseconfig()
assert config.getini("enable_assertion_pass_hook") is False
@pytest.fixture
def flag_on(self, pytester: Pytester):
pytester.makeini("[pytest]\nenable_assertion_pass_hook = True\n")
@pytest.fixture
def hook_on(self, pytester: Pytester):
pytester.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
def test_hook_call(self, pytester: Pytester, flag_on, hook_on) -> None:
pytester.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
# cover failing assertions with a message
def test_fails():
assert False, "assert with message"
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"*Assertion Passed: a+b == c+d (1 + 2) == (3 + 0) at line 7*"
)
def test_hook_call_with_parens(self, pytester: Pytester, flag_on, hook_on) -> None:
pytester.makepyfile(
"""\
def f(): return 1
def test():
assert f()
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines("*Assertion Passed: f() 1")
def test_hook_not_called_without_hookimpl(
self, pytester: Pytester, monkeypatch, flag_on
) -> None:
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
def raise_on_assertionpass(*_, **__):
raise Exception("Assertion passed called when it shouldn't!")
monkeypatch.setattr(
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
pytester.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_hook_not_called_without_cmd_option(
self, pytester: Pytester, monkeypatch
) -> None:
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
def raise_on_assertionpass(*_, **__):
raise Exception("Assertion passed called when it shouldn't!")
monkeypatch.setattr(
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
pytester.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
pytester.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
# fmt: off
@pytest.mark.parametrize(
("src", "expected"),
(
pytest.param(b"", {}, id="trivial"),
pytest.param(
b"def x(): assert 1\n",
{1: "1"},
id="assert statement not on own line",
),
pytest.param(
b"def x():\n"
b" assert 1\n"
b" assert 1+2\n",
{2: "1", 3: "1+2"},
id="multiple assertions",
),
pytest.param(
# changes in encoding cause the byte offsets to be different
"# -*- coding: latin1\n"
"def ÀÀÀÀÀ(): assert 1\n".encode("latin1"),
{2: "1"},
id="latin1 encoded on first line\n",
),
pytest.param(
# using the default utf-8 encoding
"def ÀÀÀÀÀ(): assert 1\n".encode(),
{1: "1"},
id="utf-8 encoded on first line",
),
pytest.param(
b"def x():\n"
b" assert (\n"
b" 1 + 2 # comment\n"
b" )\n",
{2: "(\n 1 + 2 # comment\n )"},
id="multi-line assertion",
),
pytest.param(
b"def x():\n"
b" assert y == [\n"
b" 1, 2, 3\n"
b" ]\n",
{2: "y == [\n 1, 2, 3\n ]"},
id="multi line assert with list continuation",
),
pytest.param(
b"def x():\n"
b" assert 1 + \\\n"
b" 2\n",
{2: "1 + \\\n 2"},
id="backslash continuation",
),
pytest.param(
b"def x():\n"
b" assert x, y\n",
{2: "x"},
id="assertion with message",
),
pytest.param(
b"def x():\n"
b" assert (\n"
b" f(1, 2, 3)\n"
b" ), 'f did not work!'\n",
{2: "(\n f(1, 2, 3)\n )"},
id="assertion with message, test spanning multiple lines",
),
pytest.param(
b"def x():\n"
b" assert \\\n"
b" x\\\n"
b" , 'failure message'\n",
{2: "x"},
id="escaped newlines plus message",
),
pytest.param(
b"def x(): assert 5",
{1: "5"},
id="no newline at end of file",
),
),
)
def test_get_assertion_exprs(src, expected) -> None:
assert _get_assertion_exprs(src) == expected
# fmt: on
def test_try_makedirs(monkeypatch, tmp_path: Path) -> None:
from _pytest.assertion.rewrite import try_makedirs
p = tmp_path / "foo"
# create
assert try_makedirs(p)
assert p.is_dir()
# already exist
assert try_makedirs(p)
# monkeypatch to simulate all error situations
def fake_mkdir(p, exist_ok=False, *, exc):
assert isinstance(p, Path)
raise exc
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=FileNotFoundError()))
assert not try_makedirs(p)
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=NotADirectoryError()))
assert not try_makedirs(p)
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=PermissionError()))
assert not try_makedirs(p)
err = OSError()
err.errno = errno.EROFS
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
assert not try_makedirs(p)
err = OSError()
err.errno = errno.ENOSYS
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
assert not try_makedirs(p)
# unhandled OSError should raise
err = OSError()
err.errno = errno.ECHILD
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
with pytest.raises(OSError) as exc_info:
try_makedirs(p)
assert exc_info.value.errno == errno.ECHILD
| TestAssertionPass |
python | plotly__plotly.py | plotly/graph_objs/icicle/marker/_line.py | {
"start": 233,
"end": 4730
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle.marker"
_path_str = "icicle.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
@property
def color(self):
"""
Sets the color of the line enclosing each sector. Defaults to
the `paper_bgcolor` value.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.icicle.marker.Line`
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | networkx__networkx | networkx/generators/tests/test_random_clustered.py | {
"start": 39,
"end": 1297
} | class ____:
def test_custom_joint_degree_sequence(self):
node = [1, 1, 1, 2, 1, 2, 0, 0]
tri = [0, 0, 0, 0, 0, 1, 1, 1]
joint_degree_sequence = zip(node, tri)
G = nx.random_clustered_graph(joint_degree_sequence)
assert G.number_of_nodes() == 8
assert G.number_of_edges() == 7
def test_tuple_joint_degree_sequence(self):
G = nx.random_clustered_graph([(1, 2), (2, 1), (1, 1), (1, 1), (1, 1), (2, 0)])
assert G.number_of_nodes() == 6
assert G.number_of_edges() == 10
def test_invalid_joint_degree_sequence_type(self):
with pytest.raises(nx.NetworkXError, match="Invalid degree sequence"):
nx.random_clustered_graph([[1, 1], [2, 1], [0, 1]])
def test_invalid_joint_degree_sequence_value(self):
with pytest.raises(nx.NetworkXError, match="Invalid degree sequence"):
nx.random_clustered_graph([[1, 1], [1, 2], [0, 1]])
def test_directed_graph_raises_error(self):
with pytest.raises(nx.NetworkXError, match="Directed Graph not supported"):
nx.random_clustered_graph(
[(1, 2), (2, 1), (1, 1), (1, 1), (1, 1), (2, 0)],
create_using=nx.DiGraph,
)
| TestRandomClusteredGraph |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format02.py | {
"start": 315,
"end": 1554
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format02.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [46335872, 46365696]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"line": {"color": "red"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ethereum__web3.py | web3/beacon/beacon.py | {
"start": 1235,
"end": 8650
} | class ____:
def __init__(
self,
base_url: str,
request_timeout: float = 10.0,
) -> None:
self.base_url = base_url
self.request_timeout = request_timeout
self._request_session_manager = HTTPSessionManager()
def _make_get_request(
self, endpoint_url: str, params: dict[str, str] | None = None
) -> dict[str, Any]:
uri = URI(self.base_url + endpoint_url)
return self._request_session_manager.json_make_get_request(
uri, params=params, timeout=self.request_timeout
)
def _make_post_request(
self, endpoint_url: str, body: list[str] | dict[str, Any]
) -> dict[str, Any]:
uri = URI(self.base_url + endpoint_url)
return self._request_session_manager.json_make_post_request(
uri, json=body, timeout=self.request_timeout
)
# [ BEACON endpoints ]
# states
def get_genesis(self) -> dict[str, Any]:
return self._make_get_request(GET_GENESIS)
def get_hash_root(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_HASH_ROOT.format(state_id))
def get_fork_data(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_FORK_DATA.format(state_id))
def get_finality_checkpoint(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_FINALITY_CHECKPOINT.format(state_id))
def get_validators(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_VALIDATORS.format(state_id))
def get_validator(
self, validator_id: str, state_id: str = "head"
) -> dict[str, Any]:
return self._make_get_request(GET_VALIDATOR.format(state_id, validator_id))
def get_validator_balances(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_VALIDATOR_BALANCES.format(state_id))
def get_epoch_committees(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_EPOCH_COMMITTEES.format(state_id))
def get_epoch_sync_committees(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_EPOCH_SYNC_COMMITTEES.format(state_id))
def get_epoch_randao(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_EPOCH_RANDAO.format(state_id))
# headers
def get_block_headers(self) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK_HEADERS)
def get_block_header(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK_HEADER.format(block_id))
# blocks
def get_block(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK.format(block_id))
def get_block_root(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK_ROOT.format(block_id))
def get_block_attestations(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK_ATTESTATIONS.format(block_id))
def get_blinded_blocks(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_BLINDED_BLOCKS.format(block_id))
# rewards
def get_rewards(self, block_id: str) -> dict[str, Any]:
return self._make_get_request(GET_REWARDS.format(block_id))
# light client (untested but follows spec)
def get_light_client_bootstrap_structure(
self, block_root: HexStr
) -> dict[str, Any]:
return self._make_get_request(
GET_LIGHT_CLIENT_BOOTSTRAP_STRUCTURE.format(block_root)
)
def get_light_client_updates(self) -> dict[str, Any]:
return self._make_get_request(GET_LIGHT_CLIENT_UPDATES)
def get_light_client_finality_update(self) -> dict[str, Any]:
return self._make_get_request(GET_LIGHT_CLIENT_FINALITY_UPDATE)
def get_light_client_optimistic_update(self) -> dict[str, Any]:
return self._make_get_request(GET_LIGHT_CLIENT_OPTIMISTIC_UPDATE)
# pool
def get_attestations(self) -> dict[str, Any]:
return self._make_get_request(GET_ATTESTATIONS)
def get_attester_slashings(self) -> dict[str, Any]:
return self._make_get_request(GET_ATTESTER_SLASHINGS)
def get_proposer_slashings(self) -> dict[str, Any]:
return self._make_get_request(GET_PROPOSER_SLASHINGS)
def get_voluntary_exits(self) -> dict[str, Any]:
return self._make_get_request(GET_VOLUNTARY_EXITS)
def get_bls_to_execution_changes(self) -> dict[str, Any]:
return self._make_get_request(GET_BLS_TO_EXECUTION_CHANGES)
# [ CONFIG endpoints ]
def get_fork_schedule(self) -> dict[str, Any]:
return self._make_get_request(GET_FORK_SCHEDULE)
def get_spec(self) -> dict[str, Any]:
return self._make_get_request(GET_SPEC)
def get_deposit_contract(self) -> dict[str, Any]:
return self._make_get_request(GET_DEPOSIT_CONTRACT)
# [ DEBUG endpoints ]
def get_beacon_state(self, state_id: str = "head") -> dict[str, Any]:
return self._make_get_request(GET_BEACON_STATE.format(state_id))
def get_beacon_heads(self) -> dict[str, Any]:
return self._make_get_request(GET_BEACON_HEADS)
# [ NODE endpoints ]
def get_node_identity(self) -> dict[str, Any]:
return self._make_get_request(GET_NODE_IDENTITY)
def get_peers(self) -> dict[str, Any]:
return self._make_get_request(GET_PEERS)
def get_peer(self, peer_id: str) -> dict[str, Any]:
return self._make_get_request(GET_PEER.format(peer_id))
def get_peer_count(self) -> dict[str, Any]:
return self._make_get_request(GET_PEER_COUNT)
def get_health(self) -> int:
url = URI(self.base_url + GET_HEALTH)
response = self._request_session_manager.get_response_from_get_request(url)
return response.status_code
def get_version(self) -> dict[str, Any]:
return self._make_get_request(GET_VERSION)
def get_syncing(self) -> dict[str, Any]:
return self._make_get_request(GET_SYNCING)
# [ BLOB endpoints ]
def get_blob_sidecars(
self, block_id: str, indices: list[int] | None = None
) -> dict[str, Any]:
indices_param = {"indices": ",".join(map(str, indices))} if indices else None
return self._make_get_request(
GET_BLOB_SIDECARS.format(block_id),
params=indices_param,
)
# [ VALIDATOR endpoints ]
def get_attester_duties(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return self._make_post_request(
GET_ATTESTER_DUTIES.format(epoch), validator_indices
)
def get_block_proposer_duties(self, epoch: str) -> dict[str, Any]:
return self._make_get_request(GET_BLOCK_PROPOSERS_DUTIES.format(epoch))
def get_sync_committee_duties(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return self._make_post_request(
GET_SYNC_COMMITTEE_DUTIES.format(epoch), validator_indices
)
# [ REWARDS endpoints ]
def get_attestations_rewards(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return self._make_post_request(
GET_ATTESTATIONS_REWARDS.format(epoch), validator_indices
)
| Beacon |
python | Lightning-AI__lightning | src/lightning/pytorch/trainer/connectors/logger_connector/result.py | {
"start": 3429,
"end": 5907
} | class ____:
fx: str
name: str
prog_bar: bool = False
logger: bool = True
on_step: bool = False
on_epoch: bool = True
# https://github.com/pytorch/pytorch/issues/96197
reduce_fx: Callable = torch.mean
enable_graph: bool = False
add_dataloader_idx: bool = True
dataloader_idx: Optional[int] = None
metric_attribute: Optional[str] = None
_sync: Optional[_Sync] = None
def __post_init__(self) -> None:
if not self.on_step and not self.on_epoch:
raise MisconfigurationException("`self.log(on_step=False, on_epoch=False)` is not useful.")
self._parse_reduce_fx()
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are supported."
" If you need a custom reduction, please log a `torchmetrics.Metric` instance instead."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
reduce_fx = self.reduce_fx.lower()
if reduce_fx == "avg":
reduce_fx = "mean"
if reduce_fx not in ("min", "max", "mean", "sum"):
raise MisconfigurationException(error)
self.reduce_fx = getattr(torch, reduce_fx)
elif self.is_custom_reduction:
raise MisconfigurationException(error)
@property
def sync(self) -> _Sync:
assert self._sync is not None
return self._sync
@sync.setter
def sync(self, sync: _Sync) -> None:
if sync.op is None:
sync.op = self.reduce_fx.__name__
self._sync = sync
@property
def forked(self) -> bool:
return self.on_step and self.on_epoch
def forked_name(self, on_step: bool) -> str:
if self.forked:
return f"{self.name}_{'step' if on_step else 'epoch'}"
return self.name
@property
def is_mean_reduction(self) -> bool:
return self.reduce_fx is torch.mean
@property
def is_sum_reduction(self) -> bool:
return self.reduce_fx in (torch.sum, sum)
@property
def is_max_reduction(self) -> bool:
return self.reduce_fx in (torch.max, max)
@property
def is_min_reduction(self) -> bool:
return self.reduce_fx in (torch.min, min)
@property
def is_custom_reduction(self) -> bool:
return not (self.is_mean_reduction or self.is_max_reduction or self.is_min_reduction or self.is_sum_reduction)
| _Metadata |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 41364,
"end": 42002
} | class ____(BaseModel):
"""Represents the dependency graph of deployments in an application.
The topology shows which deployments call which other deployments,
with the ingress deployment as the entry point.
"""
app_name: str = Field(
description="The name of the application this topology belongs to."
)
nodes: Dict[str, DeploymentNode] = Field(
description="The adjacency list of deployment nodes."
)
ingress_deployment: Optional[str] = Field(
default=None, description="The name of the ingress deployment (entry point)."
)
@PublicAPI(stability="stable")
| DeploymentTopology |
python | numpy__numpy | numpy/_core/tests/test_unicode.py | {
"start": 8726,
"end": 8892
} | class ____(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
| TestAssignValues_1009_UCS2 |
python | joke2k__faker | tests/providers/test_python.py | {
"start": 5556,
"end": 11129
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker()
Faker.seed(0)
def test_pyfloat(self):
result = self.fake.pyfloat()
self.assertIsInstance(result, float)
def test_left_digits(self):
expected_left_digits = 10
result = self.fake.pyfloat(left_digits=expected_left_digits)
left_digits = len(str(abs(int(result))))
self.assertGreaterEqual(expected_left_digits, left_digits)
def test_right_digits(self):
expected_right_digits = 10
result = self.fake.pyfloat(right_digits=expected_right_digits)
right_digits = len(("%r" % result).split(".")[1])
self.assertGreaterEqual(expected_right_digits, right_digits)
def test_positive(self):
result = self.fake.pyfloat(positive=True)
self.assertGreater(result, 0)
self.assertEqual(result, abs(result))
def test_min_value(self):
min_values = (0, 10, -1000, 1000, 999999)
for min_value in min_values:
result = self.fake.pyfloat(min_value=min_value)
self.assertGreaterEqual(result, min_value)
def test_min_value_and_left_digits(self):
"""
Combining the min_value and left_digits keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pyfloat(left_digits=1, min_value=0)
self.assertLess(result, 10)
self.assertGreaterEqual(result, 0)
def test_max_value(self):
max_values = (0, 10, -1000, 1000, 999999)
for max_value in max_values:
result = self.fake.pyfloat(max_value=max_value)
self.assertLessEqual(result, max_value)
def test_max_value_zero_and_left_digits(self):
"""
Combining the max_value and left_digits keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pyfloat(left_digits=2, max_value=0)
self.assertLessEqual(result, 0)
self.assertGreater(result, -100)
def test_max_value_should_be_greater_than_min_value(self):
"""
An exception should be raised if min_value is greater than max_value
"""
expected_message = "Min value cannot be greater than max value"
with self.assertRaises(ValueError) as raises:
self.fake.pyfloat(min_value=100, max_value=0)
message = str(raises.exception)
self.assertEqual(message, expected_message)
def test_max_value_and_positive(self):
"""
Combining the max_value and positive keyword arguments produces
numbers that obey both of those constraints.
"""
result = self.fake.pyfloat(positive=True, max_value=100)
self.assertLessEqual(result, 100)
self.assertGreater(result, 0)
def test_max_and_min_value_positive_with_decimals(self):
"""
Combining the max_value and min_value keyword arguments with
positive values for each produces numbers that obey both of
those constraints.
"""
for _ in range(1000):
result = self.fake.pyfloat(min_value=100.123, max_value=200.321)
self.assertLessEqual(result, 200.321)
self.assertGreaterEqual(result, 100.123)
def test_max_and_min_value_negative(self):
"""
Combining the max_value and min_value keyword arguments with
negative values for each produces numbers that obey both of
those constraints.
"""
result = self.fake.pyfloat(max_value=-100, min_value=-200)
self.assertLessEqual(result, -100)
self.assertGreaterEqual(result, -200)
def test_max_and_min_value_negative_with_decimals(self):
"""
Combining the max_value and min_value keyword arguments with
negative values for each produces numbers that obey both of
those constraints.
"""
for _ in range(1000):
result = self.fake.pyfloat(max_value=-100.123, min_value=-200.321)
self.assertLessEqual(result, -100.123)
self.assertGreaterEqual(result, -200.321)
def test_positive_and_min_value_incompatible(self):
"""
An exception should be raised if positive=True is set, but
a negative min_value is provided.
"""
expected_message = "Cannot combine positive=True with negative or zero min_value"
with self.assertRaises(ValueError) as raises:
self.fake.pyfloat(min_value=-100, positive=True)
message = str(raises.exception)
self.assertEqual(message, expected_message)
def test_positive_doesnt_return_zero(self):
"""
Choose the right_digits and max_value so it's guaranteed to return zero,
then watch as it doesn't because positive=True
"""
result = self.fake.pyfloat(positive=True, right_digits=0, max_value=1)
self.assertGreater(result, 0)
@pytest.mark.filterwarnings(
# Convert the warning to an error for this test
r"error:non-integer arguments to randrange\(\):DeprecationWarning"
)
def test_float_min_and_max_value_does_not_warn(self):
"""
Float arguments to randrange are deprecated from Python 3.10. This is a regression
test to check that `pyfloat` does not cause a deprecation warning.
"""
self.fake.pyfloat(min_value=-1.0, max_value=1.0)
def test_float_min_and_max_value_with_same_whole(self):
self.fake.pyfloat(min_value=2.3, max_value=2.5)
| TestPyfloat |
python | boto__boto3 | boto3/resources/model.py | {
"start": 3108,
"end": 4267
} | class ____:
"""
An auto-filled parameter which has a source and target. For example,
the ``QueueUrl`` may be auto-filled from a resource's ``url`` identifier
when making calls to ``queue.receive_messages``.
:type target: string
:param target: The destination parameter name, e.g. ``QueueUrl``
:type source_type: string
:param source_type: Where the source is defined.
:type source: string
:param source: The source name, e.g. ``Url``
"""
def __init__(
self, target, source, name=None, path=None, value=None, **kwargs
):
#: (``string``) The destination parameter name
self.target = target
#: (``string``) Where the source is defined
self.source = source
#: (``string``) The name of the source, if given
self.name = name
#: (``string``) The JMESPath query of the source
self.path = path
#: (``string|int|float|bool``) The source constant value
self.value = value
# Complain if we encounter any unknown values.
if kwargs:
logger.warning('Unknown parameter options found: %s', kwargs)
| Parameter |
python | celery__celery | celery/utils/functional.py | {
"start": 857,
"end": 5142
} | class ____(lazy):
"""Memoized lazy evaluation.
The function is only evaluated once, every subsequent access
will return the same value.
"""
#: Set to :const:`True` after the object has been evaluated.
evaluated = False
_value = None
def evaluate(self):
if not self.evaluated:
self._value = super().evaluate()
self.evaluated = True
return self._value
def noop(*args, **kwargs):
"""No operation.
Takes any arguments/keyword arguments and does nothing.
"""
def pass1(arg, *args, **kwargs):
"""Return the first positional argument."""
return arg
def evaluate_promises(it):
for value in it:
if isinstance(value, promise):
value = value()
yield value
def first(predicate, it):
"""Return the first element in ``it`` that ``predicate`` accepts.
If ``predicate`` is None it will return the first item that's not
:const:`None`.
"""
return next(
(v for v in evaluate_promises(it) if (
predicate(v) if predicate is not None else v is not None)),
None,
)
def firstmethod(method, on_call=None):
"""Multiple dispatch.
Return a function that with a list of instances,
finds the first instance that gives a value for the given method.
The list can also contain lazy instances
(:class:`~kombu.utils.functional.lazy`.)
"""
def _matcher(it, *args, **kwargs):
for obj in it:
try:
meth = getattr(maybe_evaluate(obj), method)
reply = (on_call(meth, *args, **kwargs) if on_call
else meth(*args, **kwargs))
except AttributeError:
pass
else:
if reply is not None:
return reply
return _matcher
def chunks(it, n):
"""Split an iterator into chunks with `n` elements each.
Warning:
``it`` must be an actual iterator, if you pass this a
concrete sequence will get you repeating elements.
So ``chunks(iter(range(1000)), 10)`` is fine, but
``chunks(range(1000), 10)`` is not.
Example:
# n == 2
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)
>>> list(x)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]
# n == 3
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)
>>> list(x)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]
"""
for item in it:
yield [item] + list(islice(it, n - 1))
def padlist(container, size, default=None):
"""Pad list with default elements.
Example:
>>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3)
('George', 'Costanza', 'NYC')
>>> first, last, city = padlist(['George', 'Costanza'], 3)
('George', 'Costanza', None)
>>> first, last, city, planet = padlist(
... ['George', 'Costanza', 'NYC'], 4, default='Earth',
... )
('George', 'Costanza', 'NYC', 'Earth')
"""
return list(container)[:size] + [default] * (size - len(container))
def mattrgetter(*attrs):
"""Get attributes, ignoring attribute errors.
Like :func:`operator.itemgetter` but return :const:`None` on missing
attributes instead of raising :exc:`AttributeError`.
"""
return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs}
def uniq(it):
"""Return all unique elements in ``it``, preserving order."""
seen = set()
return (seen.add(obj) or obj for obj in it if obj not in seen)
def lookahead(it):
"""Yield pairs of (current, next) items in `it`.
`next` is None if `current` is the last item.
Example:
>>> list(lookahead(x for x in range(6)))
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
"""
a, b = tee(it)
next(b, None)
return zip_longest(a, b)
def regen(it):
"""Convert iterator to an object that can be consumed multiple times.
``Regen`` takes any iterable, and if the object is an
generator it will cache the evaluated list on first access,
so that the generator can be "consumed" multiple times.
"""
if isinstance(it, (list, tuple)):
return it
return _regen(it)
| mlazy |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 183727,
"end": 184105
} | class ____(BuiltinObjectType, PythonTypeConstructorMixin):
"""
builtin types like list, dict etc which can be subscripted in annotations
"""
def __init__(self, name, cname, objstruct_cname=None):
super().__init__(
name, cname, objstruct_cname=objstruct_cname)
self.set_python_type_constructor_name(name)
| BuiltinTypeConstructorObjectType |
python | eventlet__eventlet | tests/isolated/wsgi_connection_timeout.py | {
"start": 1149,
"end": 1941
} | class ____:
# server's socket.accept(); patches resulting connection sockets
def __init__(self, sock):
self.sock = sock
self.sock._really_accept = self.sock.accept
self.sock.accept = self
self.conn_reg = []
def unwrap(self):
self.sock.accept = self.sock._really_accept
del self.sock._really_accept
for conn_wrap in self.conn_reg:
conn_wrap.unwrap()
def arm(self):
output_buffer.append("ca-click")
for i in self.conn_reg:
i.arm()
def __call__(self):
output_buffer.append(self.__class__.__name__ + ".__call__")
conn, addr = self.sock._really_accept()
self.conn_reg.append(ExplodingConnectionWrap(conn))
return conn, addr
| NaughtySocketAcceptWrap |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 15304,
"end": 17313
} | class ____(nn.Module):
def __init__(self, n_classes=256, act=torch.softmax):
"""Compute log binomial distribution for n_classes
Args:
n_classes (`int`, *optional*, defaults to 256):
Number of output classes.
act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):
Activation function to apply to the output.
"""
super().__init__()
self.k = n_classes
self.act = act
self.register_buffer("k_idx", torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False)
self.register_buffer("k_minus_1", torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)
def forward(self, probabilities, temperature=1.0, eps=1e-4):
"""Compute the log binomial distribution for probabilities.
Args:
probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Tensor containing probabilities of each class.
temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1):
Temperature of distribution.
eps (`float`, *optional*, defaults to 1e-4):
Small number for numerical stability.
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, height, width)`:
Log binomial distribution logbinomial(p;t).
"""
if probabilities.ndim == 3:
probabilities = probabilities.unsqueeze(1) # make it (batch_size, num_channels, height, width)
one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1)
probabilities = torch.clamp(probabilities, eps, 1)
y = (
log_binom(self.k_minus_1, self.k_idx)
+ self.k_idx * torch.log(probabilities)
+ (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities)
)
return self.act(y / temperature, dim=1)
| LogBinomialSoftmax |
python | django__django | tests/constraints/tests.py | {
"start": 960,
"end": 3946
} | class ____(SimpleTestCase):
def test_constraint_sql(self):
c = BaseConstraint(name="name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.constraint_sql(None, None)
def test_contains_expressions(self):
c = BaseConstraint(name="name")
self.assertIs(c.contains_expressions, False)
def test_create_sql(self):
c = BaseConstraint(name="name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.create_sql(None, None)
def test_remove_sql(self):
c = BaseConstraint(name="name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.remove_sql(None, None)
def test_validate(self):
c = BaseConstraint(name="name")
msg = "This method must be implemented by a subclass."
with self.assertRaisesMessage(NotImplementedError, msg):
c.validate(None, None)
def test_default_violation_error_message(self):
c = BaseConstraint(name="name")
self.assertEqual(
c.get_violation_error_message(), "Constraint “name” is violated."
)
def test_custom_violation_error_message(self):
c = BaseConstraint(
name="base_name", violation_error_message="custom %(name)s message"
)
self.assertEqual(c.get_violation_error_message(), "custom base_name message")
def test_custom_violation_error_message_clone(self):
constraint = BaseConstraint(
name="base_name",
violation_error_message="custom %(name)s message",
).clone()
self.assertEqual(
constraint.get_violation_error_message(),
"custom base_name message",
)
def test_custom_violation_code_message(self):
c = BaseConstraint(name="base_name", violation_error_code="custom_code")
self.assertEqual(c.violation_error_code, "custom_code")
def test_deconstruction(self):
constraint = BaseConstraint(
name="base_name",
violation_error_message="custom %(name)s message",
violation_error_code="custom_code",
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, "django.db.models.BaseConstraint")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "base_name",
"violation_error_message": "custom %(name)s message",
"violation_error_code": "custom_code",
},
)
def test_name_required(self):
msg = (
"BaseConstraint.__init__() missing 1 required keyword-only argument: 'name'"
)
with self.assertRaisesMessage(TypeError, msg):
BaseConstraint()
| BaseConstraintTests |
python | realpython__materials | python-guitar-synthesizer/source_code_final/src/digitar/instrument.py | {
"start": 798,
"end": 1739
} | class ____:
tuning: StringTuning
vibration: Time
damping: float = 0.5
def __post_init__(self) -> None:
if not (0 < self.damping <= 0.5):
raise ValueError("string damping must be in the range of (0, 0.5]")
@cached_property
def num_strings(self) -> int:
return len(self.tuning.strings)
@cache
def downstroke(self, chord: Chord) -> tuple[Pitch, ...]:
return tuple(reversed(self.upstroke(chord)))
@cache
def upstroke(self, chord: Chord) -> tuple[Pitch, ...]:
if len(chord) != self.num_strings:
raise ValueError(
"chord and instrument must have the same string count"
)
return tuple(
string.press_fret(fret_number)
for string, fret_number in zip(
self.tuning.strings, chord, strict=False
)
if fret_number is not None
)
| PluckedStringInstrument |
python | pallets__werkzeug | examples/shorty/utils.py | {
"start": 1788,
"end": 2959
} | class ____:
def __init__(self, query, per_page, page, endpoint):
self.query = query
self.per_page = per_page
self.page = page
self.endpoint = endpoint
@cached_property
def count(self):
return self.query.count()
@cached_property
def entries(self):
return (
self.query.offset((self.page - 1) * self.per_page)
.limit(self.per_page)
.all()
)
@property
def has_previous(self):
"""Return True if there are pages before the current one."""
return self.page > 1
@property
def has_next(self):
"""Return True if there are pages after the current one."""
return self.page < self.pages
@property
def previous(self):
"""Return the URL for the previous page."""
return url_for(self.endpoint, page=self.page - 1)
@property
def next(self):
"""Return the URL for the next page."""
return url_for(self.endpoint, page=self.page + 1)
@property
def pages(self):
"""Return the number of pages."""
return max(0, self.count - 1) // self.per_page + 1
| Pagination |
python | getsentry__sentry | src/sentry/core/endpoints/organization_teams.py | {
"start": 3088,
"end": 9267
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
permission_classes = (OrganizationTeamsPermission,)
def team_serializer_for_post(self):
# allow child routes to supply own serializer, used in SCIM teams route
return TeamSerializer()
@extend_schema(
operation_id="List an Organization's Teams",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
TeamParams.DETAILED,
CursorQueryParam,
],
request=None,
responses={
200: inline_sentry_response_serializer(
"ListOrgTeamResponse", list[TeamSerializerResponse]
),
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=TeamExamples.LIST_ORG_TEAMS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Returns a list of teams bound to a organization.
"""
# TODO(dcramer): this should be system-wide default for organization
# based endpoints
if request.auth and hasattr(request.auth, "project"):
return Response(status=403)
queryset = (
Team.objects.filter(organization=organization, status=TeamStatus.ACTIVE)
.order_by("slug")
.select_related("organization") # Used in TeamSerializer
)
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "hasExternalTeams":
has_external_teams = "true" in value
if has_external_teams:
queryset = queryset.filter(
id__in=ExternalActor.objects.filter(
organization=organization
).values_list("team_id")
)
else:
queryset = queryset.exclude(
id__in=ExternalActor.objects.filter(
organization=organization
).values_list("team_id")
)
elif key == "query":
joined_value = " ".join(value)
queryset = queryset.filter(
Q(name__icontains=joined_value) | Q(slug__icontains=joined_value)
)
elif key == "slug":
queryset = queryset.filter(slug__in=value)
elif key == "id":
try:
int_values = [int(item) for item in value]
except ValueError:
raise ParseError(detail="Invalid id value")
queryset = queryset.filter(id__in=int_values)
else:
queryset = queryset.none()
is_detailed = request.GET.get("detailed", "1") != "0"
expand = ["projects", "externalTeams"] if is_detailed else []
return self.paginate(
request=request,
queryset=queryset,
order_by="slug",
on_results=lambda x: serialize(x, request.user, TeamSerializer(expand=expand)),
paginator_cls=OffsetPaginator,
)
def should_add_creator_to_team(self, request: Request):
return request.user.is_authenticated
@extend_schema(
operation_id="Create a New Team",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
],
request=TeamPostSerializer,
responses={
201: TeamSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: OpenApiResponse(description="A team with this slug already exists."),
},
examples=TeamExamples.CREATE_TEAM,
)
def post(self, request: Request, organization, **kwargs) -> Response:
"""
Create a new team bound to an organization. Requires at least one of the `name`
or `slug` body params to be set.
"""
serializer = TeamPostSerializer(data=request.data)
if serializer.is_valid():
result = serializer.validated_data
try:
with transaction.atomic(router.db_for_write(Team)):
team = Team.objects.create(
name=result.get("name") or result["slug"],
slug=result.get("slug"),
idp_provisioned=result.get("idp_provisioned", False),
organization=organization,
)
except (IntegrityError, MaxSnowflakeRetryError):
return Response(
{
"non_field_errors": [CONFLICTING_SLUG_ERROR],
"detail": CONFLICTING_SLUG_ERROR,
},
status=409,
)
else:
team_created.send_robust(
organization=organization, user=request.user, team=team, sender=self.__class__
)
if self.should_add_creator_to_team(request):
try:
member = OrganizationMember.objects.get(
user_id=request.user.id, organization=organization
)
except OrganizationMember.DoesNotExist:
pass
else:
OrganizationMemberTeam.objects.create(team=team, organizationmember=member)
self.create_audit_entry(
request=request,
organization=organization,
target_object=team.id,
event=audit_log.get_event_id("TEAM_ADD"),
data=team.get_audit_log_data(),
)
return Response(
serialize(team, request.user, self.team_serializer_for_post()),
status=201,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| OrganizationTeamsEndpoint |
python | nedbat__coveragepy | tests/test_files.py | {
"start": 17246,
"end": 27841
} | class ____(CoverageTest):
"""Tests for coverage/files.py:PathAliases"""
run_in_temp_dir = False
def assert_mapped(self, aliases: PathAliases, inp: str, out: str) -> None:
"""Assert that `inp` mapped through `aliases` produces `out`.
If the aliases are not relative, then `out` is canonicalized first,
since aliases produce canonicalized paths by default.
"""
mapped = aliases.map(inp, exists=lambda p: True)
if aliases.relative:
expected = out
else:
expected = files.canonical_filename(out)
assert mapped == expected
def assert_unchanged(self, aliases: PathAliases, inp: str, exists: bool = True) -> None:
"""Assert that `inp` mapped through `aliases` is unchanged."""
assert aliases.map(inp, exists=lambda p: exists) == inp
def test_noop(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
self.assert_unchanged(aliases, "/ned/home/a.py")
def test_nomatch(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/home/*/src", "./mysrc")
self.assert_unchanged(aliases, "/home/foo/a.py")
def test_wildcard(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/ned/home/*/src", "./mysrc")
self.assert_mapped(aliases, "/ned/home/foo/src/a.py", "./mysrc/a.py")
aliases = PathAliases(relative=rel_yn)
aliases.add("/ned/home/*/src/", "./mysrc")
self.assert_mapped(aliases, "/ned/home/foo/src/a.py", "./mysrc/a.py")
def test_no_accidental_match(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/home/*/src", "./mysrc")
self.assert_unchanged(aliases, "/home/foo/srcetc")
def test_no_map_if_not_exist(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/ned/home/*/src", "./mysrc")
self.assert_unchanged(aliases, "/ned/home/foo/src/a.py", exists=False)
self.assert_unchanged(aliases, "foo/src/a.py", exists=False)
def test_no_dotslash(self, rel_yn: bool) -> None:
# The result shouldn't start with "./" if the map result didn't.
aliases = PathAliases(relative=rel_yn)
aliases.add("*/project", ".")
self.assert_mapped(aliases, "/ned/home/project/src/a.py", os_sep("src/a.py"))
def test_relative_pattern(self) -> None:
aliases = PathAliases(relative=True)
aliases.add(".tox/*/site-packages", "src")
self.assert_mapped(
aliases,
".tox/py314/site-packages/proj/a.py",
os_sep("src/proj/a.py"),
)
def test_multiple_patterns(self, rel_yn: bool) -> None:
# also test the debugfn...
msgs: list[str] = []
aliases = PathAliases(debugfn=msgs.append, relative=rel_yn)
aliases.add("/home/*/src", "./mysrc")
aliases.add("/lib/*/libsrc", "./mylib")
self.assert_mapped(aliases, "/home/foo/src/a.py", "./mysrc/a.py")
self.assert_mapped(aliases, "/lib/foo/libsrc/a.py", "./mylib/a.py")
if rel_yn:
assert msgs == [
"Aliases (relative=True):",
" Rule: '/home/*/src' -> './mysrc/' using regex "
+ "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'",
" Rule: '/lib/*/libsrc' -> './mylib/' using regex "
+ "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'",
"Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', "
+ "producing './mysrc/a.py'",
"Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', "
+ "producing './mylib/a.py'",
]
else:
assert msgs == [
"Aliases (relative=False):",
" Rule: '/home/*/src' -> './mysrc/' using regex "
+ "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'",
" Rule: '/lib/*/libsrc' -> './mylib/' using regex "
+ "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'",
"Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', "
+ f"producing {files.canonical_filename('./mysrc/a.py')!r}",
"Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', "
+ f"producing {files.canonical_filename('./mylib/a.py')!r}",
]
@pytest.mark.parametrize(
"badpat",
[
"/ned/home/*",
"/ned/home/*/",
"/ned/home/*/*/",
],
)
def test_cant_have_wildcard_at_end(self, badpat: str) -> None:
aliases = PathAliases()
msg = "Pattern must not end with wildcards."
with pytest.raises(ConfigError, match=msg):
aliases.add(badpat, "fooey")
def test_no_accidental_munging(self) -> None:
aliases = PathAliases()
aliases.add(r"c:\Zoo\boo", "src/")
aliases.add("/home/ned$", "src/")
self.assert_mapped(aliases, r"c:\Zoo\boo\foo.py", "src/foo.py")
self.assert_mapped(aliases, r"/home/ned$/foo.py", "src/foo.py")
def test_paths_are_os_corrected(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/home/ned/*/src", "./mysrc")
aliases.add(r"c:\ned\src", "./mysrc")
self.assert_mapped(aliases, r"C:\Ned\src\sub\a.py", "./mysrc/sub/a.py")
aliases = PathAliases(relative=rel_yn)
aliases.add("/home/ned/*/src", r".\mysrc")
aliases.add(r"c:\ned\src", r".\mysrc")
self.assert_mapped(
aliases,
r"/home/ned/foo/src/sub/a.py",
r".\mysrc\sub\a.py",
)
# Try the paths in both orders.
lin = "*/project/module/"
win = "*\\project\\module\\"
lin_win_paths = [[lin, win], [win, lin]]
@pytest.mark.parametrize("paths", lin_win_paths)
def test_windows_on_linux(self, paths: Iterable[str], rel_yn: bool) -> None:
# https://github.com/coveragepy/coveragepy/issues/618
aliases = PathAliases(relative=rel_yn)
for path in paths:
aliases.add(path, "project/module")
self.assert_mapped(
aliases,
"C:\\a\\path\\somewhere\\coveragepy_test\\project\\module\\tests\\file.py",
"project/module/tests/file.py",
)
@pytest.mark.parametrize("paths", lin_win_paths)
def test_linux_on_windows(self, paths: Iterable[str], rel_yn: bool) -> None:
# https://github.com/coveragepy/coveragepy/issues/618
aliases = PathAliases(relative=rel_yn)
for path in paths:
aliases.add(path, "project\\module")
self.assert_mapped(
aliases,
"C:/a/path/somewhere/coveragepy_test/project/module/tests/file.py",
"project\\module\\tests\\file.py",
)
@pytest.mark.parametrize("paths", lin_win_paths)
def test_relative_windows_on_linux(self, paths: Iterable[str]) -> None:
# https://github.com/coveragepy/coveragepy/issues/991
aliases = PathAliases(relative=True)
for path in paths:
aliases.add(path, "project/module")
self.assert_mapped(
aliases,
r"project\module\tests\file.py",
r"project/module/tests/file.py",
)
@pytest.mark.parametrize("paths", lin_win_paths)
def test_relative_linux_on_windows(self, paths: Iterable[str]) -> None:
# https://github.com/coveragepy/coveragepy/issues/991
aliases = PathAliases(relative=True)
for path in paths:
aliases.add(path, r"project\module")
self.assert_mapped(
aliases,
r"project/module/tests/file.py",
r"project\module\tests\file.py",
)
@pytest.mark.skipif(env.WINDOWS, reason="This test assumes Unix file system")
def test_implicit_relative_windows_on_linux(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/991
aliases = PathAliases(relative=True)
self.assert_mapped(
aliases,
r"project\module\tests\file.py",
r"project/module/tests/file.py",
)
@pytest.mark.skipif(not env.WINDOWS, reason="This test assumes Windows file system")
def test_implicit_relative_linux_on_windows(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/991
aliases = PathAliases(relative=True)
self.assert_mapped(
aliases,
r"project/module/tests/file.py",
r"project\module\tests\file.py",
)
def test_multiple_wildcard(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("/home/jenkins/*/a/*/b/*/django", "./django")
self.assert_mapped(
aliases,
"/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py",
"./django/foo/bar.py",
)
def test_windows_root_paths(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("X:\\", "/tmp/src")
self.assert_mapped(
aliases,
"X:\\a\\file.py",
"/tmp/src/a/file.py",
)
self.assert_mapped(
aliases,
"X:\\file.py",
"/tmp/src/file.py",
)
def test_leading_wildcard(self, rel_yn: bool) -> None:
aliases = PathAliases(relative=rel_yn)
aliases.add("*/d1", "./mysrc1")
aliases.add("*/d2", "./mysrc2")
self.assert_mapped(aliases, "/foo/bar/d1/x.py", "./mysrc1/x.py")
self.assert_mapped(aliases, "/foo/bar/d2/y.py", "./mysrc2/y.py")
@pytest.mark.parametrize("dirname", [".", "..", "../other", "/"])
def test_dot(self, dirname: str) -> None:
if env.WINDOWS and dirname == "/":
# The root test case was added for the manylinux Docker images,
# and I'm not sure how it should work on Windows, so skip it.
pytest.skip("Don't know how to handle root on Windows")
aliases = PathAliases()
aliases.add(dirname, "/the/source")
the_file = os.path.join(dirname, "a.py")
the_file = os.path.expanduser(the_file)
the_file = os.path.abspath(os.path.realpath(the_file))
assert "~" not in the_file # to be sure the test is pure.
self.assert_mapped(aliases, the_file, "/the/source/a.py")
| PathAliasesTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 56385,
"end": 58023
} | class ____:
# expect a compact repr for containers & defs components to be added for tree UI
leaf_instances: Sequence[ComponentInstanceSnap]
@staticmethod
def from_tree(tree: ComponentTree) -> "ComponentTreeSnap":
leaves = []
for comp_path, comp_inst in check.inst(
tree.load_root_component(), DefsFolderComponent
).iterate_path_component_pairs():
if not isinstance(
comp_inst,
(
DefsFolderComponent,
CompositeYamlComponent,
PythonFileComponent,
),
):
cls = comp_inst.__class__
leaves.append(
ComponentInstanceSnap(
key=comp_path.get_relative_key(tree.defs_module_path),
full_type_name=f"{cls.__module__}.{cls.__qualname__}",
)
)
return ComponentTreeSnap(leaf_instances=leaves)
@whitelist_for_serdes(
storage_name="ExternalRepositoryData",
storage_field_names={
"schedules": "external_schedule_datas",
"partition_sets": "external_partition_set_datas",
"sensors": "external_sensor_datas",
"asset_nodes": "external_asset_graph_data",
"resources": "external_resource_data",
"asset_check_nodes": "external_asset_checks",
"job_datas": "external_pipeline_datas",
"job_refs": "external_job_refs",
},
skip_when_empty_fields={
"pools",
"component_tree",
"defs_state_info",
},
)
@record_custom
| ComponentTreeSnap |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/event_frequency_query_handlers.py | {
"start": 1246,
"end": 1840
} | class ____(Protocol):
def __call__(
self,
model: TSDBModel,
keys: list[TSDBKey],
start: datetime,
end: datetime,
rollup: int | None = None,
environment_id: int | None = None,
use_cache: bool = False,
jitter_value: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
conditions: list[SnubaCondition] | None = None,
group_on_time: bool = False,
project_ids: list[int] | None = None,
) -> Mapping[TSDBKey, int]: ...
| TSDBFunction |
python | apache__airflow | providers/slack/tests/unit/slack/utils/test_utils.py | {
"start": 918,
"end": 4145
} | class ____:
@pytest.mark.parametrize("conn_type", ["slack", "slack_incoming_webhook"])
def test_get_extra_field(self, conn_type):
"""Test get arguments from connection extra: prefixed and not."""
extra_config = ConnectionExtraConfig(
conn_type=conn_type,
conn_id="test-conn-id",
extra={"arg1": "foo", f"extra__{conn_type}__arg2": "bar"},
)
assert extra_config.get("arg1") == "foo"
assert extra_config.get("arg2") == "bar"
def test_missing_extra_field(self):
"""Test missing field in extra."""
extra_config = ConnectionExtraConfig(conn_type="slack", conn_id="test-conn-id", extra={})
error_message = (
r"Couldn't find 'extra__slack__arg_missing' or 'arg_missing' "
r"in Connection \('test-conn-id'\) Extra and no default value specified\."
)
with pytest.raises(KeyError, match=error_message):
extra_config.get("arg_missing")
@pytest.mark.parametrize("value", [0, False, "", None], ids=lambda x: f"bool_false_{type(x).__name__}")
def test_default_extra_field(self, value):
"""Test default value for missing field in extra."""
extra_config = ConnectionExtraConfig(conn_type="slack", extra={})
assert extra_config.get("arg_missing", default=value) == value
@pytest.mark.parametrize("conn_type", ["slack", "slack_incoming_webhook"])
def test_both_prefixed_and_not_in_extra_field(self, conn_type):
"""Test resolve field from extra when both specified prefixed and not for single field."""
extra_config = ConnectionExtraConfig(
conn_type=conn_type,
conn_id="test-conn-id",
extra={"arg1": "foo", f"extra__{conn_type}__arg1": "bar"},
)
assert extra_config.get("arg1") == "foo"
@pytest.mark.parametrize("conn_type", ["slack", "slack_incoming_webhook"])
@pytest.mark.parametrize("empty_value", [None, ""])
def test_prefixed_extra_created_in_ui_connections(self, conn_type, empty_value):
"""Test that empty strings or None values in UI ignored."""
extra_config = ConnectionExtraConfig(
conn_type=conn_type,
conn_id="test-conn-id",
extra={
f"extra__{conn_type}__arg_missing": empty_value,
"arg_extra": "bar",
f"extra__{conn_type}__arg_extra": empty_value,
},
)
error_message = (
r"Couldn't find '.*' or '.*' in Connection \('.*'\) Extra and no default value specified\."
)
with pytest.raises(KeyError, match=error_message):
# No fallback should raise an error
extra_config.get("arg_missing")
assert extra_config.get("arg_missing", default="foo") == "foo"
assert extra_config.get("arg_extra") == "bar"
def test_get_parse_int(self):
extra_config = ConnectionExtraConfig(
conn_type="slack",
extra={
"int_arg_1": "42",
"int_arg_2": 9000,
},
)
assert extra_config.getint("int_arg_1") == 42
assert extra_config.getint("int_arg_2") == 9000
| TestConnectionExtra |
python | mlflow__mlflow | dev/tests/test_update_ml_package_versions.py | {
"start": 230,
"end": 6161
} | class ____:
def __init__(self, body):
self.body = json.dumps(body).encode("utf-8")
def read(self):
return self.body
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@classmethod
def from_versions(cls, versions):
return cls(
{
"releases": {
v: [
{
"filename": v + ".whl",
"upload_time": "2023-10-04T16:38:57",
}
]
for v in versions
}
}
)
@classmethod
def from_version_infos(cls, version_infos: list[VersionInfo]) -> "MockResponse":
return cls(
{
"releases": {
v.version: [
{
"filename": v.version + ".whl",
"upload_time": v.upload_time.isoformat(),
}
]
for v in version_infos
}
}
)
@pytest.fixture(autouse=True)
def change_working_directory(tmp_path, monkeypatch):
"""
Changes the current working directory to a temporary directory to avoid modifying files in the
repository.
"""
monkeypatch.chdir(tmp_path)
def run_test(src, src_expected, mock_responses):
def patch_urlopen(url):
package_name = re.search(r"https://pypi.python.org/pypi/(.+)/json", url).group(1)
return mock_responses[package_name]
versions_yaml = Path("mlflow/ml-package-versions.yml")
versions_yaml.parent.mkdir()
versions_yaml.write_text(src)
with mock.patch("urllib.request.urlopen", new=patch_urlopen):
update_ml_package_versions.update()
assert versions_yaml.read_text() == src_expected
def test_multiple_flavors_are_correctly_updated():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
maximum: "0.0.1"
xgboost:
package_info:
pip_release: xgboost
autologging:
maximum: "0.1.1"
"""
mock_responses = {
"sklearn": MockResponse.from_versions(["0.0.2"]),
"xgboost": MockResponse.from_versions(["0.1.2"]),
}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
maximum: "0.0.2"
xgboost:
package_info:
pip_release: xgboost
autologging:
maximum: "0.1.2"
"""
run_test(src, src_expected, mock_responses)
def test_both_models_and_autologging_are_updated():
src = """
sklearn:
package_info:
pip_release: sklearn
models:
maximum: "0.0.1"
autologging:
maximum: "0.0.1"
"""
mock_responses = {
"sklearn": MockResponse.from_versions(["0.0.2"]),
}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
models:
maximum: "0.0.2"
autologging:
maximum: "0.0.2"
"""
run_test(src, src_expected, mock_responses)
def test_pre_and_dev_versions_are_ignored():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
maximum: "0.0.1"
"""
mock_responses = {
"sklearn": MockResponse.from_versions(
[
# pre-release and dev-release should be filtered out
"0.0.3.rc1", # pre-release
"0.0.3.dev1", # dev-release
"0.0.2.post", # post-release
"0.0.2", # final release
]
),
}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
maximum: "0.0.2.post"
"""
run_test(src, src_expected, mock_responses)
def test_unsupported_versions_are_ignored():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
unsupported: ["0.0.3"]
maximum: "0.0.1"
"""
mock_responses = {"sklearn": MockResponse.from_versions(["0.0.2", "0.0.3"])}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
unsupported: ["0.0.3"]
maximum: "0.0.2"
"""
run_test(src, src_expected, mock_responses)
def test_freeze_field_prevents_updating_maximum_version():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
pin_maximum: True
maximum: "0.0.1"
"""
mock_responses = {"sklearn": MockResponse.from_versions(["0.0.2"])}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
pin_maximum: True
maximum: "0.0.1"
"""
run_test(src, src_expected, mock_responses)
def test_update_min_supported_version():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
minimum: "0.0.1"
maximum: "0.0.8"
"""
mock_responses = {
"sklearn": MockResponse.from_version_infos(
[
VersionInfo("0.0.2", datetime.now() - timedelta(days=1000)),
VersionInfo("0.0.3", datetime.now() - timedelta(days=365)),
VersionInfo("0.0.8", datetime.now() - timedelta(days=180)),
]
)
}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
minimum: "0.0.3"
maximum: "0.0.8"
"""
run_test(src, src_expected, mock_responses)
def test_update_min_supported_version_for_dead_package():
src = """
sklearn:
package_info:
pip_release: sklearn
autologging:
minimum: "0.0.7"
maximum: "0.0.8"
"""
mock_responses = {
"sklearn": MockResponse.from_version_infos(
[
VersionInfo("0.0.7", datetime.now() - timedelta(days=1000)),
VersionInfo("0.0.8", datetime.now() - timedelta(days=800)),
]
)
}
src_expected = """
sklearn:
package_info:
pip_release: sklearn
autologging:
minimum: "0.0.8"
maximum: "0.0.8"
"""
run_test(src, src_expected, mock_responses)
| MockResponse |
python | getsentry__sentry | src/sentry/feedback/usecases/ingest/userreport.py | {
"start": 1055,
"end": 10038
} | class ____(Exception):
pass
def save_userreport(
project: Project,
report: UserReportDict,
source: FeedbackCreationSource,
start_time: datetime | None = None,
) -> UserReport | None:
with metrics.timer("userreport.create_user_report", tags={"referrer": source.value}):
if start_time is None:
start_time = timezone.now()
if is_in_feedback_denylist(project.organization):
metrics.incr(
"user_report.create_user_report.filtered",
tags={"reason": "org.denylist", "referrer": source.value},
)
metrics.incr("feedback.ingest.denylist")
if (
source == FeedbackCreationSource.USER_REPORT_DJANGO_ENDPOINT
or source == FeedbackCreationSource.CRASH_REPORT_EMBED_FORM
):
raise PermissionDenied()
return None
should_filter, metrics_reason, display_reason = validate_user_report(
report, project.id, source=source
)
if should_filter:
metrics.incr(
"user_report.create_user_report.filtered",
tags={"reason": metrics_reason, "referrer": source.value},
)
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=None,
outcome=Outcome.INVALID,
reason=display_reason,
timestamp=start_time,
event_id=None, # Note report["event_id"] is id of the associated event, not the report itself.
category=DataCategory.USER_REPORT_V2,
quantity=1,
)
if (
source == FeedbackCreationSource.USER_REPORT_DJANGO_ENDPOINT
or source == FeedbackCreationSource.CRASH_REPORT_EMBED_FORM
):
raise BadRequest(display_reason)
return None
# XXX(dcramer): enforce case insensitivity by coercing this to a lowercase string
report["event_id"] = report["event_id"].lower()
report["project_id"] = project.id
# Use the associated event to validate and update the report.
event: Event | GroupEvent | None = eventstore.backend.get_event_by_id(
project.id, report["event_id"]
)
if event:
# if the event is more than 30 minutes old, we don't allow updates
# as it might be abusive
if event.datetime < start_time - timedelta(minutes=30):
metrics.incr(
"user_report.create_user_report.filtered",
tags={"reason": "event_too_old", "referrer": source.value},
)
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=None,
outcome=Outcome.INVALID,
reason="Associated event is too old",
timestamp=start_time,
event_id=None,
category=DataCategory.USER_REPORT_V2,
quantity=1,
)
raise Conflict("Feedback for this event cannot be modified.")
report["environment_id"] = event.get_environment().id
if event.group_id:
report["group_id"] = event.group_id
# Save the report.
try:
with atomic_transaction(using=router.db_for_write(UserReport)):
report_instance = UserReport.objects.create(**report)
except IntegrityError:
# There was a duplicate, so just overwrite the existing
# row with the new one. The only way this ever happens is
# if someone is messing around with the API, or doing
# something wrong with the SDK, but this behavior is
# more reasonable than just hard erroring and is more
# expected.
existing_report = UserReport.objects.get(
project_id=report["project_id"], event_id=report["event_id"]
)
# if the existing report was submitted more than 5 minutes ago, we dont
# allow updates as it might be abusive (replay attacks)
if existing_report.date_added < timezone.now() - timedelta(minutes=5):
metrics.incr(
"user_report.create_user_report.filtered",
tags={"reason": "duplicate_report", "referrer": source.value},
)
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=None,
outcome=Outcome.INVALID,
reason="Duplicate report",
timestamp=start_time,
event_id=None,
category=DataCategory.USER_REPORT_V2,
quantity=1,
)
raise Conflict("Feedback for this event cannot be modified.")
existing_report.update(
name=report.get("name", ""),
email=report.get("email", ""),
comments=report["comments"],
)
report_instance = existing_report
metrics.incr(
"user_report.create_user_report.overwrite_duplicate",
tags={"referrer": source.value},
)
else:
if report_instance.group_id:
report_instance.notify()
# Additionally processing if save is successful.
user_feedback_received.send_robust(project=project, sender=save_userreport)
metrics.incr(
"user_report.create_user_report.saved",
tags={"has_event": bool(event), "referrer": source.value},
)
if event and source.value in FeedbackCreationSource.old_feedback_category_values():
shim_to_feedback(report, event, project, source)
# XXX(aliu): the update_user_reports task will still try to shim the report after a period, unless group_id or environment is set.
return report_instance
def validate_user_report(
report: UserReportDict,
project_id: int,
source: FeedbackCreationSource = FeedbackCreationSource.USER_REPORT_ENVELOPE,
) -> tuple[bool, str | None, str | None]:
"""
Validates required fields, field lengths, and garbage messages. Also checks that event_id is a valid UUID. Does not raise errors.
Reformatting: strips whitespace from comments and dashes from event_id.
Returns a tuple of (should_filter, metrics_reason, display_reason). XXX: ensure metrics and outcome reasons have bounded cardinality.
"""
if "comments" not in report:
return True, "missing_comments", "Missing comments" # type: ignore[unreachable]
if "event_id" not in report:
return True, "missing_event_id", "Missing event_id" # type: ignore[unreachable]
report["comments"] = report["comments"].strip()
name, email, comments = (
report.get("name", ""),
report.get("email", ""),
report["comments"],
)
if options.get("feedback.filter_garbage_messages"): # Message-based filter kill-switch.
if not comments:
return True, "empty", "Empty Feedback Messsage"
if comments == UNREAL_FEEDBACK_UNATTENDED_MESSAGE:
return True, "unreal.unattended", "Sent in Unreal Unattended Mode"
max_comment_length = UserReport._meta.get_field("comments").max_length
if max_comment_length and len(comments) > max_comment_length:
metrics.distribution(
"feedback.large_message",
len(comments),
tags={
"entrypoint": "save_userreport",
"referrer": source.value,
},
)
if random.random() < 0.1:
logger.info(
"Feedback message exceeds max size.",
extra={
"project_id": project_id,
"entrypoint": "save_userreport",
"referrer": source.value,
"length": len(comments),
"feedback_message": comments[:100],
},
)
return True, "too_large.message", "Message Too Large"
max_name_length = UserReport._meta.get_field("name").max_length
if max_name_length and len(name) > max_name_length:
return True, "too_large.name", "Name Too Large"
max_email_length = UserReport._meta.get_field("email").max_length
if max_email_length and len(email) > max_email_length:
return True, "too_large.email", "Email Too Large"
try:
# Validates UUID and strips dashes.
report["event_id"] = uuid.UUID(report["event_id"].lower()).hex
except ValueError:
return True, "invalid_event_id", "Invalid Event ID"
return False, None, None
| Conflict |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 60046,
"end": 63747
} | class ____:
"""
Cache for generated code. The cache key is a string representation of the input nodes,
number of stages, number of warps, and call sizes. The cache value is a tuple of the
generated code, extra code, and events.
"""
def __init__(self, *args, **kwargs):
self._cache: dict[str, GeneratedCodeCacheEntry] = {}
def cache_clear(self) -> None:
self._cache.clear()
def __repr__(self):
return repr(self._cache)
def make_key(
self,
input_nodes: tuple[ir.IRNode],
num_stages: int,
num_warps: int,
call_sizes: Sequence[sympy.core.symbol.Symbol],
prefix_args: int,
suffix_args: int,
epilogue_fn: Optional[Callable[..., Any]],
epilogue_fn_hash: Optional[str],
tma_store: bool,
subgraphs: Optional[list[ir.Buffer]], # has to be none to cache
workspace_arg: Optional[WorkspaceArg], # has to be none to cache
layout: ir.Layout,
num_consumer_groups: int,
num_buffers_warp_spec: int,
kwargs: dict[str, Any],
hint_override: Optional[int] = None,
) -> Optional[str]:
def layout_key(layout: ir.Layout) -> str:
assert not isinstance(layout, ir.FlexibleLayout)
return repr(
[
layout.size,
layout.stride,
layout.dtype,
layout.device,
layout.offset,
]
)
def has_flexible_layout() -> bool:
if isinstance(layout, ir.FlexibleLayout):
return True
for input in input_nodes:
if isinstance(input.get_layout(), ir.FlexibleLayout):
return True
return False
if epilogue_fn is identity:
assert epilogue_fn_hash is None
epilogue_fn_hash = "identity"
# we do not cache under those conditions right now.
if (
has_flexible_layout()
or subgraphs is not None
or workspace_arg is not None
or epilogue_fn_hash is None
):
return None
return repr(
{
"input_nodes": [
layout_key(input.get_layout()) for input in input_nodes
],
"num_stages": num_stages,
"num_warps": num_warps,
"prefix_args": prefix_args,
"suffix_args": suffix_args,
"call_sizes": call_sizes,
"layout": layout_key(layout),
"num_consumer_groups": num_consumer_groups,
"num_buffers_warp_spec": num_buffers_warp_spec,
"epilogue_fn_hash": epilogue_fn_hash,
"tma_store": tma_store,
"kwargs": kwargs,
"hint_override": hint_override,
}
)
def get_entry(self, cache_key: Optional[str]) -> Optional[GeneratedCodeCacheEntry]:
if cache_key is None:
return None
entry = self._cache.get(cache_key, None)
if entry is None:
torch._dynamo.utils.counters["inductor"]["generated_module_cache_miss"] += 1
else:
torch._dynamo.utils.counters["inductor"]["generated_module_cache_hit"] += 1
return entry
def put_entry(
self,
cache_key: Optional[str],
code: str,
extra: str,
events: list[Any],
) -> None:
if cache_key is None:
return
entry = GeneratedCodeCacheEntry(code, extra, events)
self._cache.update({cache_key: entry})
| GeneratedCodeCache |
python | numpy__numpy | numpy/matrixlib/tests/test_matrix_linalg.py | {
"start": 1883,
"end": 1945
} | class ____(_TestNorm2D):
array = np.matrix
| _TestNorm2DMatrix |
python | joblib__joblib | benchmarks/bench_compression.py | {
"start": 1236,
"end": 2359
} | class ____:
"""Protect the underlying fileobj against numerous calls to write
This is achieved by internally keeping a list of small chunks and
only flushing to the backing fileobj if passed a large chunk or
after a threshold on the number of small chunks.
"""
def __init__(self, fileobj, max_buffer_size=10 * 1024**2):
self._fileobj = fileobj
self._chunks = chunks = []
# As the `write` method is called many times by the pickler,
# attribute look ups on the self's __dict__ are too expensive
# hence we define a closure here with all the regularly
# accessed parameters
def _write(data):
chunks.append(data)
if len(chunks) > max_buffer_size:
self.flush()
self.write = _write
def flush(self):
self._fileobj.write(b"".join(self._chunks[:]))
del self._chunks[:]
def close(self):
self.flush()
self._fileobj.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
| PickleBufferedWriter |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 7310,
"end": 7647
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'env']
valid_subsets = ['env']
fact_namespace = 'ansible_env'
collector_class = EnvFactCollector
def test_collect(self):
facts_dict = super(TestEnvFacts, self)._test_collect()
self.assertIn('HOME', facts_dict['env'])
| TestEnvFacts |
python | weaviate__weaviate-python-client | weaviate/connect/base.py | {
"start": 665,
"end": 1137
} | class ____(BaseModel):
host: str
port: int
secure: bool
@field_validator("host")
def _check_host(cls, v: str) -> str:
if v == "":
raise ValueError("host must not be empty")
return v
@field_validator("port")
def _check_port(cls, v: int) -> int:
if v < 0 or v > 65535:
raise ValueError("port must be between 0 and 65535")
return v
T = TypeVar("T", bound="ConnectionParams")
| ProtocolParams |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadImpl2.py | {
"start": 2392,
"end": 2637
} | class ____(Generic[T_contra]):
def method(self, x: T_contra) -> int:
assert False
@overload
def func7(x: None) -> int: ...
@overload
def func7(x: ClassD[T]) -> int: ...
def func7(x: ClassD[T] | None) -> int:
assert False
| ClassD |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/jobs.py | {
"start": 35213,
"end": 36093
} | class ____:
def __init__(self, jobs: Jobs) -> None:
self._jobs = jobs
self.create = to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = to_streamed_response_wrapper(
jobs.list,
)
self.cancel = to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = to_streamed_response_wrapper(
jobs.list_events,
)
self.pause = to_streamed_response_wrapper(
jobs.pause,
)
self.resume = to_streamed_response_wrapper(
jobs.resume,
)
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
| JobsWithStreamingResponse |
python | explosion__spaCy | spacy/schemas.py | {
"start": 2364,
"end": 2445
} | class ____:
extra = "forbid"
arbitrary_types_allowed = True
| ArgSchemaConfig |
python | huggingface__transformers | examples/pytorch/instance-segmentation/run_instance_segmentation.py | {
"start": 5872,
"end": 6333
} | class ____:
class_queries_logits: torch.Tensor
masks_queries_logits: torch.Tensor
def nested_cpu(tensors):
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_cpu(t) for t in tensors)
elif isinstance(tensors, Mapping):
return type(tensors)({k: nested_cpu(t) for k, t in tensors.items()})
elif isinstance(tensors, torch.Tensor):
return tensors.cpu().detach()
else:
return tensors
| ModelOutput |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/optimizer/torch_optimizer.py | {
"start": 743,
"end": 9461
} | class ____(Optimizer):
def __init__(self, policy: TorchPolicy, trainer_settings: TrainerSettings):
super().__init__()
self.policy = policy
self.trainer_settings = trainer_settings
self.update_dict: Dict[str, torch.Tensor] = {}
self.value_heads: Dict[str, torch.Tensor] = {}
self.memory_in: torch.Tensor = None
self.memory_out: torch.Tensor = None
self.m_size: int = 0
self.global_step = torch.tensor(0)
self.bc_module: Optional[BCModule] = None
self.create_reward_signals(trainer_settings.reward_signals)
self.critic_memory_dict: Dict[str, torch.Tensor] = {}
if trainer_settings.behavioral_cloning is not None:
self.bc_module = BCModule(
self.policy,
trainer_settings.behavioral_cloning,
policy_learning_rate=trainer_settings.hyperparameters.learning_rate,
default_batch_size=trainer_settings.hyperparameters.batch_size,
default_num_epoch=3,
)
@property
def critic(self):
raise NotImplementedError
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:
pass
def create_reward_signals(
self, reward_signal_configs: Dict[RewardSignalType, RewardSignalSettings]
) -> None:
"""
Create reward signals
:param reward_signal_configs: Reward signal config.
"""
for reward_signal, settings in reward_signal_configs.items():
# Name reward signals by string in case we have duplicates later
self.reward_signals[reward_signal.value] = create_reward_provider(
reward_signal, self.policy.behavior_spec, settings
)
def _evaluate_by_sequence(
self, tensor_obs: List[torch.Tensor], initial_memory: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], AgentBufferField, torch.Tensor]:
"""
Evaluate a trajectory sequence-by-sequence, assembling the result. This enables us to get the
intermediate memories for the critic.
:param tensor_obs: A List of tensors of shape (trajectory_len, <obs_dim>) that are the agent's
observations for this trajectory.
:param initial_memory: The memory that preceeds this trajectory. Of shape (1,1,<mem_size>), i.e.
what is returned as the output of a MemoryModules.
:return: A Tuple of the value estimates as a Dict of [name, tensor], an AgentBufferField of the initial
memories to be used during value function update, and the final memory at the end of the trajectory.
"""
num_experiences = tensor_obs[0].shape[0]
all_next_memories = AgentBufferField()
# When using LSTM, we need to divide the trajectory into sequences of equal length. Sometimes,
# that division isn't even, and we must pad the leftover sequence.
# When it is added to the buffer, the last sequence will be padded. So if seq_len = 3 and
# trajectory is of length 10, the last sequence is [obs,pad,pad] once it is added to the buffer.
# Compute the number of elements in this sequence that will end up being padded.
leftover_seq_len = num_experiences % self.policy.sequence_length
all_values: Dict[str, List[np.ndarray]] = defaultdict(list)
_mem = initial_memory
# Evaluate other trajectories, carrying over _mem after each
# trajectory
for seq_num in range(num_experiences // self.policy.sequence_length):
seq_obs = []
for _ in range(self.policy.sequence_length):
all_next_memories.append(ModelUtils.to_numpy(_mem.squeeze()))
start = seq_num * self.policy.sequence_length
end = (seq_num + 1) * self.policy.sequence_length
for _obs in tensor_obs:
seq_obs.append(_obs[start:end])
values, _mem = self.critic.critic_pass(
seq_obs, _mem, sequence_length=self.policy.sequence_length
)
for signal_name, _val in values.items():
all_values[signal_name].append(_val)
# Compute values for the potentially truncated last sequence. Note that this
# sequence isn't padded yet, but will be.
seq_obs = []
if leftover_seq_len > 0:
for _obs in tensor_obs:
last_seq_obs = _obs[-leftover_seq_len:]
seq_obs.append(last_seq_obs)
# For the last sequence, the initial memory should be the one at the
# end of this trajectory.
for _ in range(leftover_seq_len):
all_next_memories.append(ModelUtils.to_numpy(_mem.squeeze()))
last_values, _mem = self.critic.critic_pass(
seq_obs, _mem, sequence_length=leftover_seq_len
)
for signal_name, _val in last_values.items():
all_values[signal_name].append(_val)
# Create one tensor per reward signal
all_value_tensors = {
signal_name: torch.cat(value_list, dim=0)
for signal_name, value_list in all_values.items()
}
next_mem = _mem
return all_value_tensors, all_next_memories, next_mem
def update_reward_signals(self, batch: AgentBuffer) -> Dict[str, float]:
update_stats: Dict[str, float] = {}
for reward_provider in self.reward_signals.values():
update_stats.update(reward_provider.update(batch))
return update_stats
def get_trajectory_value_estimates(
self,
batch: AgentBuffer,
next_obs: List[np.ndarray],
done: bool,
agent_id: str = "",
) -> Tuple[Dict[str, np.ndarray], Dict[str, float], Optional[AgentBufferField]]:
"""
Get value estimates and memories for a trajectory, in batch form.
:param batch: An AgentBuffer that consists of a trajectory.
:param next_obs: the next observation (after the trajectory). Used for bootstrapping
if this is not a terminal trajectory.
:param done: Set true if this is a terminal trajectory.
:param agent_id: Agent ID of the agent that this trajectory belongs to.
:returns: A Tuple of the Value Estimates as a Dict of [name, np.ndarray(trajectory_len)],
the final value estimate as a Dict of [name, float], and optionally (if using memories)
an AgentBufferField of initial critic memories to be used during update.
"""
n_obs = len(self.policy.behavior_spec.observation_specs)
if agent_id in self.critic_memory_dict:
memory = self.critic_memory_dict[agent_id]
else:
memory = (
torch.zeros((1, 1, self.critic.memory_size), device=default_device())
if self.policy.use_recurrent
else None
)
# Convert to tensors
current_obs = [
ModelUtils.list_to_tensor(obs) for obs in ObsUtil.from_buffer(batch, n_obs)
]
next_obs = [ModelUtils.list_to_tensor(obs) for obs in next_obs]
next_obs = [obs.unsqueeze(0) for obs in next_obs]
# If we're using LSTM, we want to get all the intermediate memories.
all_next_memories: Optional[AgentBufferField] = None
# To prevent memory leak and improve performance, evaluate with no_grad.
with torch.no_grad():
if self.policy.use_recurrent:
(
value_estimates,
all_next_memories,
next_memory,
) = self._evaluate_by_sequence(current_obs, memory)
else:
value_estimates, next_memory = self.critic.critic_pass(
current_obs, memory, sequence_length=batch.num_experiences
)
# Store the memory for the next trajectory. This should NOT have a gradient.
self.critic_memory_dict[agent_id] = next_memory
next_value_estimate, _ = self.critic.critic_pass(
next_obs, next_memory, sequence_length=1
)
for name, estimate in value_estimates.items():
value_estimates[name] = ModelUtils.to_numpy(estimate)
next_value_estimate[name] = ModelUtils.to_numpy(next_value_estimate[name])
if done:
for k in next_value_estimate:
if not self.reward_signals[k].ignore_done:
next_value_estimate[k] = 0.0
if agent_id in self.critic_memory_dict:
self.critic_memory_dict.pop(agent_id)
return value_estimates, next_value_estimate, all_next_memories
| TorchOptimizer |
python | getsentry__sentry | tests/sentry/sentry_apps/api/serializers/test_sentry_app.py | {
"start": 457,
"end": 3783
} | class ____(TestCase):
def test_published_app(self) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
sentry_app = self.create_sentry_app(
name="Tesla App",
organization=organization,
published=True,
scopes=("org:write", "team:admin"),
)
result = serialize(sentry_app, None, SentryAppSerializer(), access=None)
assert result["name"] == "Tesla App"
assert result["featureData"] == [
{
"featureId": 0,
"description": "Tesla App can **utilize the Sentry API** to pull data or update resources in Sentry (with permissions granted, of course).",
"featureGate": "integrations-api",
}
]
assert result["scopes"] == ["org:write", "team:admin"]
assert result.get("clientSecret") is None
def test_internal_app(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.create_project(organization=org)
sentry_app = self.create_internal_integration(
name="La Croix App", organization=org, scopes=("org:write", "team:admin")
)
result = serialize(sentry_app, None, SentryAppSerializer(), access=None)
assert result["name"] == "La Croix App"
assert result["status"] == "internal"
assert result["featureData"] == []
assert result["scopes"] == ["org:write", "team:admin"]
assert result.get("clientSecret") is None
def test_with_avatar(self) -> None:
sentry_app = self.create_sentry_app(
name="Tesla App", organization=self.organization, published=True, scopes=("org:write",)
)
SentryAppAvatar.objects.create(
sentry_app_id=sentry_app.id,
avatar_type=1, # upload
ident="abc123",
control_file_id=1,
)
result = serialize(sentry_app, None, SentryAppSerializer(), access=None)
assert "avatars" in result
assert result["avatars"][0]["avatarUuid"] == "abc123"
assert result["avatars"][0]["avatarType"] == "upload"
assert result["avatars"][0]["avatarUrl"] == "http://testserver/sentry-app-avatar/abc123/"
def test_without_optional_fields(self) -> None:
sentry_app = self.create_sentry_app(
name="Tesla App", organization=self.organization, published=True, scopes=("org:write",)
)
sentry_app.author = None
sentry_app.overview = None
sentry_app.popularity = None
sentry_app.redirect_url = None
sentry_app.webhook_url = None
sentry_app.date_published = None
sentry_app.owner = None
sentry_app.save()
result = serialize(sentry_app, None, SentryAppSerializer(), access=None)
assert result.get("author") is None
assert result.get("overview") is None
assert result.get("popularity") is None
assert result.get("redirectUrl") is None
assert result.get("webhookUrl") is None
assert result.get("datePublished") is None
assert result.get("clientSecret") is None
assert result.get("clientId") is None
assert result.get("owner") is None
@no_silo_test
| SentryAppSerializerTest |
python | walkccc__LeetCode | solutions/542. 01 Matrix/542-2.py | {
"start": 0,
"end": 644
} | class ____:
def updateMatrix(self, mat: list[list[int]]) -> list[list[int]]:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(mat)
n = len(mat[0])
q = collections.deque()
for i in range(m):
for j in range(n):
if mat[i][j] == 0:
q.append((i, j))
else:
mat[i][j] = math.inf
while q:
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if mat[x][y] <= mat[i][j] + 1:
continue
q.append((x, y))
mat[x][y] = mat[i][j] + 1
return mat
| Solution |
python | pytorch__pytorch | torch/_higher_order_ops/strict_mode.py | {
"start": 1868,
"end": 3831
} | class ____(HigherOrderOperator):
def __init__(self):
super().__init__("strict_mode")
def __call__(self, callable, operands):
return super().__call__(callable, operands)
strict_mode_op = StrictMode()
@strict_mode_op.py_impl(DispatchKey.CompositeExplicitAutograd)
def strict_mode_op_dense(callable, operands):
mode = _get_current_dispatch_mode()
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
return callable(*operands)
strict_mode_op.py_autograd_impl(
autograd_not_implemented(strict_mode_op, deferred_error=True)
)
@strict_mode_op.py_impl(ProxyTorchDispatchMode)
def inner(mode, callable, operands):
return trace_strict_mode(mode, strict_mode_op, callable, operands)
def trace_strict_mode(mode, strict_mode_op, callable, operands):
pre_dispatch = getattr(mode, "pre_dispatch", False)
with disable_proxy_modes_tracing():
graph = make_fx(callable, pre_dispatch=pre_dispatch)(*operands)
graph_name = mode.tracer.get_fresh_qualname("strict_graph_")
mode.tracer.root.register_module(graph_name, graph)
args = (graph, operands)
proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
out_proxy = mode.tracer.create_proxy(
"call_function", strict_mode_op, proxy_args, {}, name="strict_mode"
)
out = graph(*operands)
return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
@strict_mode_op.py_impl(FakeTensorMode)
def strict_mode_fake_tensor_mode(mode, callable, operands):
with mode:
true_outs = callable(*operands)
return true_outs
@strict_mode_op.py_functionalize_impl
def strict_mode_func(ctx, callable, inputs):
unwrapped_inputs = ctx.unwrap_tensors(inputs)
with ctx.redispatch_to_next():
functional_callable = ctx.functionalize(callable)
cond_return = strict_mode_op(functional_callable, unwrapped_inputs)
return ctx.wrap_tensors(cond_return)
| StrictMode |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 35546,
"end": 37082
} | class ____(FontConstantsBase):
pass
# Maps font family names to the FontConstantBase subclass to use
_font_constant_mapping = {
'DejaVu Sans': DejaVuSansFontConstants,
'DejaVu Sans Mono': DejaVuSansFontConstants,
'DejaVu Serif': DejaVuSerifFontConstants,
'cmb10': ComputerModernFontConstants,
'cmex10': ComputerModernFontConstants,
'cmmi10': ComputerModernFontConstants,
'cmr10': ComputerModernFontConstants,
'cmss10': ComputerModernFontConstants,
'cmsy10': ComputerModernFontConstants,
'cmtt10': ComputerModernFontConstants,
'STIXGeneral': STIXFontConstants,
'STIXNonUnicode': STIXFontConstants,
'STIXSizeFiveSym': STIXFontConstants,
'STIXSizeFourSym': STIXFontConstants,
'STIXSizeThreeSym': STIXFontConstants,
'STIXSizeTwoSym': STIXFontConstants,
'STIXSizeOneSym': STIXFontConstants,
# Map the fonts we used to ship, just for good measure
'Bitstream Vera Sans': DejaVuSansFontConstants,
'Bitstream Vera': DejaVuSansFontConstants,
}
def _get_font_constant_set(state: ParserState) -> type[FontConstantsBase]:
constants = _font_constant_mapping.get(
state.fontset._get_font(state.font).family_name, FontConstantsBase)
# STIX sans isn't really its own fonts, just different code points
# in the STIX fonts, so we have to detect this one separately.
if (constants is STIXFontConstants and
isinstance(state.fontset, StixSansFonts)):
return STIXSansFontConstants
return constants
| DejaVuSansFontConstants |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/sensors/databricks_sql.py | {
"start": 1357,
"end": 5633
} | class ____(BaseSensorOperator):
"""
Sensor that runs a SQL query on Databricks.
:param databricks_conn_id: Reference to :ref:`Databricks
connection id<howto/connection:databricks>` (templated), defaults to
DatabricksSqlHook.default_conn_name.
:param sql_warehouse_name: Optional name of Databricks SQL warehouse. If not specified, ``http_path``
must be provided as described below, defaults to None
:param http_path: Optional string specifying HTTP path of Databricks SQL warehouse or All Purpose cluster.
If not specified, it should be either specified in the Databricks connection's
extra parameters, or ``sql_warehouse_name`` must be specified.
:param session_configuration: An optional dictionary of Spark session parameters. If not specified,
it could be specified in the Databricks connection's extra parameters, defaults to None
:param http_headers: An optional list of (k, v) pairs
that will be set as HTTP headers on every request. (templated).
:param catalog: An optional initial catalog to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to ""
:param schema: An optional initial schema to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to "default"
:param sql: SQL statement to be executed.
:param handler: Handler for DbApiHook.run() to return results, defaults to fetch_all_handler
:param client_parameters: Additional parameters internal to Databricks SQL connector parameters.
"""
template_fields: Sequence[str] = (
"databricks_conn_id",
"sql",
"catalog",
"schema",
"http_headers",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
databricks_conn_id: str = DatabricksSqlHook.default_conn_name,
http_path: str | None = None,
sql_warehouse_name: str | None = None,
session_configuration=None,
http_headers: list[tuple[str, str]] | None = None,
catalog: str = "",
schema: str = "default",
sql: str | Iterable[str],
handler: Callable[[Any], Any] = fetch_all_handler,
client_parameters: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""Create DatabricksSqlSensor object using the specified input arguments."""
self.databricks_conn_id = databricks_conn_id
self._http_path = http_path
self._sql_warehouse_name = sql_warehouse_name
self.session_config = session_configuration
self.http_headers = http_headers
self.catalog = catalog
self.schema = schema
self.sql = sql
self.caller = "DatabricksSqlSensor"
self.client_parameters = client_parameters or {}
self.hook_params = kwargs.pop("hook_params", {})
self.handler = handler
super().__init__(**kwargs)
@cached_property
def hook(self) -> DatabricksSqlHook:
"""Creates and returns a DatabricksSqlHook object."""
return DatabricksSqlHook(
self.databricks_conn_id,
self._http_path,
self._sql_warehouse_name,
self.session_config,
self.http_headers,
self.catalog,
self.schema,
self.caller,
**self.client_parameters,
**self.hook_params,
)
def _get_results(self) -> bool:
"""Use the Databricks SQL hook and run the specified SQL query."""
if not (self._http_path or self._sql_warehouse_name):
message = (
"Databricks SQL warehouse/cluster configuration missing. Please specify either"
" http_path or sql_warehouse_name."
)
raise AirflowException(message)
hook = self.hook
sql_result = hook.run(
self.sql,
handler=self.handler if self.do_xcom_push else None,
)
self.log.debug("SQL result: %s", sql_result)
return bool(sql_result)
def poke(self, context: Context) -> bool:
"""Sensor poke function to get and return results from the SQL sensor."""
return self._get_results()
| DatabricksSqlSensor |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/query/query_transform/base.py | {
"start": 2390,
"end": 2848
} | class ____(BaseQueryTransform):
"""
Identity query transform.
Do nothing to the query.
"""
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def _run(self, query_bundle: QueryBundle, metadata: Dict) -> QueryBundle:
"""Run query transform."""
return query_bundle
| IdentityQueryTransform |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/neural_network.py | {
"start": 222,
"end": 4750
} | class ____():
"""Neural Network. Deep Learning base model.
Parameters:
-----------
optimizer: class
The weight optimizer that will be used to tune the weights in order of minimizing
the loss.
loss: class
Loss function used to measure the model's performance. SquareLoss or CrossEntropy.
validation: tuple
A tuple containing validation data and labels (X, y)
"""
def __init__(self, optimizer, loss, validation_data=None):
self.optimizer = optimizer
self.layers = []
self.errors = {"training": [], "validation": []}
self.loss_function = loss()
self.progressbar = progressbar.ProgressBar(widgets=bar_widgets)
self.val_set = None
if validation_data:
X, y = validation_data
self.val_set = {"X": X, "y": y}
def set_trainable(self, trainable):
""" Method which enables freezing of the weights of the network's layers. """
for layer in self.layers:
layer.trainable = trainable
def add(self, layer):
""" Method which adds a layer to the neural network """
# If this is not the first layer added then set the input shape
# to the output shape of the last added layer
if self.layers:
layer.set_input_shape(shape=self.layers[-1].output_shape())
# If the layer has weights that needs to be initialized
if hasattr(layer, 'initialize'):
layer.initialize(optimizer=self.optimizer)
# Add layer to the network
self.layers.append(layer)
def test_on_batch(self, X, y):
""" Evaluates the model over a single batch of samples """
y_pred = self._forward_pass(X, training=False)
loss = np.mean(self.loss_function.loss(y, y_pred))
acc = self.loss_function.acc(y, y_pred)
return loss, acc
def train_on_batch(self, X, y):
""" Single gradient update over one batch of samples """
y_pred = self._forward_pass(X)
loss = np.mean(self.loss_function.loss(y, y_pred))
acc = self.loss_function.acc(y, y_pred)
# Calculate the gradient of the loss function wrt y_pred
loss_grad = self.loss_function.gradient(y, y_pred)
# Backpropagate. Update weights
self._backward_pass(loss_grad=loss_grad)
return loss, acc
def fit(self, X, y, n_epochs, batch_size):
""" Trains the model for a fixed number of epochs """
for _ in self.progressbar(range(n_epochs)):
batch_error = []
for X_batch, y_batch in batch_iterator(X, y, batch_size=batch_size):
loss, _ = self.train_on_batch(X_batch, y_batch)
batch_error.append(loss)
self.errors["training"].append(np.mean(batch_error))
if self.val_set is not None:
val_loss, _ = self.test_on_batch(self.val_set["X"], self.val_set["y"])
self.errors["validation"].append(val_loss)
return self.errors["training"], self.errors["validation"]
def _forward_pass(self, X, training=True):
""" Calculate the output of the NN """
layer_output = X
for layer in self.layers:
layer_output = layer.forward_pass(layer_output, training)
return layer_output
def _backward_pass(self, loss_grad):
""" Propagate the gradient 'backwards' and update the weights in each layer """
for layer in reversed(self.layers):
loss_grad = layer.backward_pass(loss_grad)
def summary(self, name="Model Summary"):
# Print model name
print (AsciiTable([[name]]).table)
# Network input shape (first layer's input shape)
print ("Input Shape: %s" % str(self.layers[0].input_shape))
# Iterate through network and get each layer's configuration
table_data = [["Layer Type", "Parameters", "Output Shape"]]
tot_params = 0
for layer in self.layers:
layer_name = layer.layer_name()
params = layer.parameters()
out_shape = layer.output_shape()
table_data.append([layer_name, str(params), str(out_shape)])
tot_params += params
# Print network configuration table
print (AsciiTable(table_data).table)
print ("Total Parameters: %d\n" % tot_params)
def predict(self, X):
""" Use the trained model to predict labels of X """
return self._forward_pass(X, training=False)
| NeuralNetwork |
python | modin-project__modin | modin/pandas/window.py | {
"start": 3267,
"end": 8574
} | class ____(ClassLogger):
def __init__(
self,
dataframe,
window=None,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
step=None,
method="single",
):
if step is not None:
raise NotImplementedError("step parameter is not implemented yet.")
self._dataframe = dataframe
self._query_compiler = dataframe._query_compiler
self.rolling_kwargs = {
"window": window,
"min_periods": min_periods,
"center": center,
"win_type": win_type,
"on": on,
"axis": axis,
"closed": closed,
"step": step,
"method": method,
}
self.axis = axis
def _call_qc_method(self, method_name, *args, **kwargs):
"""
Call a query compiler method for the specified rolling aggregation.
Parameters
----------
method_name : str
Name of the aggregation.
*args : tuple
Positional arguments to pass to the query compiler method.
**kwargs : dict
Keyword arguments to pass to the query compiler method.
Returns
-------
BaseQueryCompiler
QueryCompiler holding the result of the aggregation.
"""
qc_method = getattr(self._query_compiler, f"rolling_{method_name}")
return qc_method(self.axis, self.rolling_kwargs, *args, **kwargs)
def _aggregate(self, method_name, *args, **kwargs):
"""
Run the specified rolling aggregation.
Parameters
----------
method_name : str
Name of the aggregation.
*args : tuple
Positional arguments to pass to the aggregation.
**kwargs : dict
Keyword arguments to pass to the aggregation.
Returns
-------
DataFrame or Series
Result of the aggregation.
"""
qc_result = self._call_qc_method(method_name, *args, **kwargs)
return self._dataframe.__constructor__(query_compiler=qc_result)
def count(self):
return self._aggregate("count")
def sem(self, *args, **kwargs):
return self._aggregate("sem", *args, **kwargs)
def sum(self, *args, **kwargs):
return self._aggregate("sum", *args, **kwargs)
def mean(self, *args, **kwargs):
return self._aggregate("mean", *args, **kwargs)
def median(self, **kwargs):
return self._aggregate("median", **kwargs)
def var(self, ddof=1, *args, **kwargs):
return self._aggregate("var", ddof, *args, **kwargs)
def std(self, ddof=1, *args, **kwargs):
return self._aggregate("std", ddof, *args, **kwargs)
def min(self, *args, **kwargs):
return self._aggregate("min", *args, **kwargs)
def max(self, *args, **kwargs):
return self._aggregate("max", *args, **kwargs)
def corr(self, other=None, pairwise=None, *args, **kwargs):
from .dataframe import DataFrame
from .series import Series
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
elif isinstance(other, Series):
other = other._query_compiler.to_pandas().squeeze()
return self._aggregate("corr", other, pairwise, *args, **kwargs)
def cov(self, other=None, pairwise=None, ddof: Optional[int] = 1, **kwargs):
from .dataframe import DataFrame
from .series import Series
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
elif isinstance(other, Series):
other = other._query_compiler.to_pandas().squeeze()
return self._aggregate("cov", other, pairwise, ddof, **kwargs)
def skew(self, **kwargs):
return self._aggregate("skew", **kwargs)
def kurt(self, **kwargs):
return self._aggregate("kurt", **kwargs)
def apply(
self,
func,
raw=False,
engine="cython",
engine_kwargs=None,
args=None,
kwargs=None,
):
func = cast_function_modin2pandas(func)
return self._aggregate("apply", func, raw, engine, engine_kwargs, args, kwargs)
def aggregate(
self,
func,
*args,
**kwargs,
):
from .dataframe import DataFrame
dataframe = DataFrame(
query_compiler=self._call_qc_method(
"aggregate",
func,
*args,
**kwargs,
)
)
if isinstance(self._dataframe, DataFrame):
return dataframe
elif is_list_like(func) and dataframe.columns.nlevels > 1:
dataframe.columns = dataframe.columns.droplevel()
return dataframe
else:
return dataframe.squeeze()
agg = aggregate
def quantile(self, q, interpolation="linear", **kwargs):
return self._aggregate("quantile", q, interpolation, **kwargs)
def rank(
self, method="average", ascending=True, pct=False, numeric_only=False, **kwargs
):
return self._aggregate("rank", method, ascending, pct, numeric_only, **kwargs)
@_inherit_docstrings(Rolling)
| Rolling |
python | PyCQA__pylint | doc/data/messages/t/too-many-ancestors/good.py | {
"start": 241,
"end": 294
} | class ____(Animal):
has_vertebrae = True
| Vertebrate |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/io_test.py | {
"start": 6573,
"end": 8351
} | class ____(IOTest, checkpoint_test_base.CheckpointTestBase):
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPI(self):
dataset = dataset_ops.Dataset.range(40)
checkpoint_args = {"directory": self._checkpoint_prefix, "max_to_keep": 50}
dataset.save(self._save_dir, checkpoint_args=checkpoint_args)
num_checkpoint_files = len(list(os.listdir(self._checkpoint_prefix)))
# By default, we checkpoint every increment. Each checkpoint writes a
# file containing the data and a file containing the index. There is
# also an overall checkpoint file. Thus, we expect (2 * 40) + 1 files.
self.assertEqual(81, num_checkpoint_files)
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPICustomCheckpointInterval(self):
dataset = dataset_ops.Dataset.range(40)
step_counter = variables.Variable(0, trainable=False)
checkpoint_args = {
"checkpoint_interval": 5,
"step_counter": step_counter,
"directory": self._checkpoint_prefix,
"max_to_keep": 10,
}
dataset.save(self._save_dir, checkpoint_args=checkpoint_args)
num_checkpoint_files = len(list(os.listdir(self._checkpoint_prefix)))
# We expect (2 * 8) + 1 files.
self.assertEqual(17, num_checkpoint_files)
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPIIncorrectArgs(self):
dataset = dataset_ops.Dataset.range(42)
checkpoint_args = {
"directory": self._checkpoint_prefix,
"incorrect_arg": "incorrect_arg"
}
with self.assertRaises(TypeError):
dataset.save(
dataset, self._save_dir, checkpoint_args=checkpoint_args)
if __name__ == "__main__":
test.main()
| SaveCheckpointTest |
python | celery__celery | t/integration/test_canvas.py | {
"start": 4791,
"end": 6949
} | class ____:
@flaky
def test_link_error_eager(self):
exception = ExpectedException("Task expected to fail", "test")
result = fail.apply(args=("test",), link_error=return_exception.s())
actual = result.get(timeout=TIMEOUT, propagate=False)
assert actual == exception
@flaky
def test_link_error(self):
exception = ExpectedException("Task expected to fail", "test")
result = fail.apply(args=("test",), link_error=return_exception.s())
actual = result.get(timeout=TIMEOUT, propagate=False)
assert actual == exception
@flaky
def test_link_error_callback_error_callback_retries_eager(self):
exception = ExpectedException("Task expected to fail", "test")
result = fail.apply(
args=("test",),
link_error=retry_once.s(countdown=None)
)
assert result.get(timeout=TIMEOUT, propagate=False) == exception
@flaky
def test_link_error_callback_retries(self, manager):
exception = ExpectedException("Task expected to fail", "test")
result = fail.apply_async(
args=("test",),
link_error=retry_once.s(countdown=None)
)
assert result.get(timeout=TIMEOUT / 10, propagate=False) == exception
@flaky
def test_link_error_using_signature_eager(self):
fail = signature('t.integration.tasks.fail', args=("test",))
return_exception = signature('t.integration.tasks.return_exception')
fail.link_error(return_exception)
exception = ExpectedException("Task expected to fail", "test")
assert (fail.apply().get(timeout=TIMEOUT, propagate=False), True) == (
exception, True)
def test_link_error_using_signature(self, manager):
fail = signature('t.integration.tasks.fail', args=("test",))
return_exception = signature('t.integration.tasks.return_exception')
fail.link_error(return_exception)
exception = ExpectedException("Task expected to fail", "test")
assert (fail.delay().get(timeout=TIMEOUT / 10, propagate=False), True) == (
exception, True)
| test_link_error |
python | django__django | django/db/models/fields/__init__.py | {
"start": 77167,
"end": 77314
} | class ____(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
| SmallIntegerField |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 105992,
"end": 110606
} | class ____(fixtures.DeclarativeMappedTest, AssertsCompiledSQL):
__dialect__ = "default"
run_create_tables = None
@classmethod
def setup_classes(cls):
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.interfaces import PropComparator
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
b_data = association_proxy("bs", "value")
well_behaved_b_data = association_proxy("bs", "well_behaved_value")
fails_on_class_access = association_proxy(
"bs", "fails_on_class_access"
)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
aid = Column(ForeignKey("a.id"))
data = Column(String(50))
@hybrid_property
def well_behaved_value(self):
return self.data
@well_behaved_value.setter
def well_behaved_value(self, value):
self.data = value
@hybrid_property
def value(self):
return self.data
@value.setter
def value(self, value):
self.data = value
@value.comparator
class value(PropComparator):
# comparator has no proxy __getattr__, so we can't
# get to impl to see what we ar proxying towards.
# as of #4690 we assume column-oriented proxying
def __init__(self, cls):
self.cls = cls
@hybrid_property
def well_behaved_w_expr(self):
return self.data
@well_behaved_w_expr.setter
def well_behaved_w_expr(self, value):
self.data = value
@well_behaved_w_expr.expression
def well_behaved_w_expr(cls):
return cast(cls.data, Integer)
@hybrid_property
def fails_on_class_access(self):
return len(self.data)
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
_b = relationship("B")
attr = association_proxy("_b", "well_behaved_w_expr")
def test_msg_fails_on_cls_access(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B(data="b1")])
with expect_raises_message(
exc.InvalidRequestError,
"Association proxy received an unexpected error when trying to "
'retreive attribute "B.fails_on_class_access" from '
r'class "B": .* no len\(\)',
):
a1.fails_on_class_access
def test_get_ambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B(data="b1")])
eq_(a1.b_data[0], "b1")
def test_get_nonambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B(data="b1")])
eq_(a1.well_behaved_b_data[0], "b1")
def test_set_ambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B()])
a1.b_data[0] = "b1"
eq_(a1.b_data[0], "b1")
def test_set_nonambiguous(self):
A, B = self.classes("A", "B")
a1 = A(bs=[B()])
a1.b_data[0] = "b1"
eq_(a1.well_behaved_b_data[0], "b1")
def test_expr_nonambiguous(self):
A, B = self.classes("A", "B")
eq_(
str(A.well_behaved_b_data == 5),
"EXISTS (SELECT 1 \nFROM b, a \nWHERE "
"a.id = b.aid AND b.data = :data_1)",
)
def test_get_classlevel_ambiguous(self):
A, B = self.classes("A", "B")
eq_(
str(A.b_data),
"ColumnAssociationProxyInstance"
"(AssociationProxy('bs', 'value'))",
)
def test_comparator_ambiguous(self):
A, B = self.classes("A", "B")
s = fixture_session()
self.assert_compile(
s.query(A).filter(A.b_data.any()),
"SELECT a.id AS a_id FROM a WHERE EXISTS "
"(SELECT 1 FROM b WHERE a.id = b.aid)",
)
def test_explicit_expr(self):
(C,) = self.classes("C")
s = fixture_session()
self.assert_compile(
s.query(C).filter_by(attr=5),
"SELECT c.id AS c_id, c.b_id AS c_b_id FROM c WHERE EXISTS "
"(SELECT 1 FROM b WHERE b.id = c.b_id AND "
"CAST(b.data AS INTEGER) = :param_1)",
)
| ProxyHybridTest |
python | falconry__falcon | examples/recipes/msgspec_main.py | {
"start": 357,
"end": 613
} | class ____(msgspec.Struct):
text: Annotated[str, msgspec.Meta(max_length=256)]
noteid: uuid.UUID = msgspec.field(default_factory=uuid.uuid4)
created: datetime = msgspec.field(
default_factory=partial(datetime.now, timezone.utc)
)
| Note |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer_swin.py | {
"start": 2564,
"end": 4877
} | class ____(ModelOutput):
r"""
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward`
method.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
| MaskFormerSwinBaseModelOutput |
python | keras-team__keras | keras/src/ops/nn_test.py | {
"start": 111863,
"end": 121880
} | class ____(testing.TestCase):
def test_logit_recovery_binary_crossentropy(self):
layer = layers.Dense(
4, activation="sigmoid", use_bias=False, kernel_initializer="ones"
)
loss = losses.BinaryCrossentropy()
x = np.array([[1.4, 1.6, 0.8]])
y = np.array([[0.2, 0.6, 0.1, 0.3]])
loss_value = loss(y, layer(x))
self.assertAllClose(loss_value, 2.682124)
model = models.Sequential([layer])
model.compile(loss="binary_crossentropy", optimizer="sgd")
out = model.evaluate(x, y)
self.assertAllClose(out, 2.682124)
def test_softmax_on_axis_with_size_one_warns(self):
x = np.array([[1.0]])
# Applying softmax on the second axis, which has size 1
axis = 1
# Expected warning message
expected_warning_regex = (
r"You are using a softmax over axis 1 "
r"of a tensor of shape \(1, 1\)\. This axis "
r"has size 1\. The softmax operation will always return "
r"the value 1, which is likely not what you intended\. "
r"Did you mean to use a sigmoid instead\?"
)
with self.assertWarnsRegex(UserWarning, expected_warning_regex):
knn.softmax(x, axis)
def test_normalize_order_validation(self):
# Test with a non-integer order
with self.assertRaisesRegex(
ValueError, "Argument `order` must be an int >= 1"
):
knn.normalize(np.array([1, 2, 3]), order="a")
# Test with a negative integer
with self.assertRaisesRegex(
ValueError, "Argument `order` must be an int >= 1"
):
knn.normalize(np.array([1, 2, 3]), order=-1)
# Test with zero
with self.assertRaisesRegex(
ValueError, "Argument `order` must be an int >= 1"
):
knn.normalize(np.array([1, 2, 3]), order=0)
# Test with a floating-point number
with self.assertRaisesRegex(
ValueError, "Argument `order` must be an int >= 1"
):
knn.normalize(np.array([1, 2, 3]), order=2.5)
def test_check_shape_first_dim_mismatch(self):
name1, shape1 = "labels", (2, 3)
name2, shape2 = "logits", (3, 4, 5)
ctc_loss_instance = knn.CTCLoss(mask_index=-1)
with self.assertRaisesRegex(
ValueError, "must have the same first dimension"
):
ctc_loss_instance._check_shape_first_dim(
name1, shape1, name2, shape2
)
def test_invalid_strategy_ctc_decode(self):
inputs = np.array(
[
[
[0.1, 0.4, 0.2, 0.4],
[0.3, 0.3, 0.4, 0.2],
[0.3, 0.2, 0.4, 0.3],
]
]
)
beam_width = 4
top_paths = 2
with self.assertRaisesRegex(ValueError, "Invalid strategy"):
knn.ctc_decode(
inputs,
sequence_lengths=[3, 3, 1],
strategy="invalid",
beam_width=beam_width,
top_paths=top_paths,
)
def test_layer_normalization_rms_scaling_warning(self):
x = np.arange(5, dtype="float32")
with self.assertWarnsRegex(
UserWarning, r"You passed `rms_scaling=True`, which is deprecated"
):
knn.layer_normalization(x, rms_scaling=True)
def test_unfold(self):
if keras.config.backend() in ["openvino"]:
pytest.skip("Backend does not support unfold operation")
# test 1 kernel_size=2
x = ops.arange(8, dtype="float32")
x = ops.reshape(x, [1, 1, 2, 4])
unfold_result = knn.unfold(x, 2)
except_result = ops.convert_to_tensor(
[
[
[0.0, 1.0, 2.0],
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[5.0, 6.0, 7.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 2 kernel_size=[2,4]
x = ops.arange(16, dtype="float32")
x = ops.reshape(x, [1, 1, 4, 4])
unfold_result = knn.unfold(x, [2, 4])
except_result = ops.convert_to_tensor(
[
[
[0.0, 4.0, 8.0],
[1.0, 5.0, 9.0],
[2.0, 6.0, 10.0],
[3.0, 7.0, 11.0],
[4.0, 8.0, 12.0],
[5.0, 9.0, 13.0],
[6.0, 10.0, 14.0],
[7.0, 11.0, 15.0],
]
],
dtype="float32",
)
self.assertAllClose(unfold_result, except_result)
# test 3 kernel_size=[3,2],stride=[3,2]
x = ops.arange(12, dtype="float32")
x = ops.reshape(x, [1, 1, 3, 4])
unfold_result = knn.unfold(x, [3, 2], stride=[3, 2])
except_result = ops.convert_to_tensor(
[
[
[0.0, 2.0],
[1.0, 3.0],
[4.0, 6.0],
[5.0, 7.0],
[8.0, 10.0],
[9.0, 11.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 4 kernel_size=2,dilation=2,stride=2
x = ops.arange(16, dtype="float32")
x = ops.reshape(x, [1, 1, 4, 4])
unfold_result = knn.unfold(x, 2, 2, stride=2)
except_result = ops.convert_to_tensor([0, 2, 8, 10], dtype="float32")
except_result = ops.reshape(except_result, [1, 4, 1])
self.assertAllClose(unfold_result, except_result)
# test 5 kernel_size=2,padding=1
x = ops.arange(4, dtype="float32")
x = ops.reshape(x, [1, 1, 2, 2])
unfold_result = knn.unfold(x, 1, padding=1)
except_result = ops.convert_to_tensor(
[
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
2.0,
3.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 6 multi channal and kernel_size=2
x = ops.arange(8, dtype="float32")
x = ops.reshape(x, [1, 2, 2, 2])
unfold_result = knn.unfold(x, 2)
except_result = ops.convert_to_tensor(
[[[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0]]]
)
self.assertAllClose(unfold_result, except_result)
# test 7 multi channal and kernel_size=[2,3]
x = ops.arange(12, dtype="float32")
x = ops.reshape(x, [1, 2, 2, 3])
unfold_result = knn.unfold(x, [2, 3])
except_result = ops.convert_to_tensor(
[
[
[0.0],
[1.0],
[2.0],
[3.0],
[4.0],
[5.0],
[6.0],
[7.0],
[8.0],
[9.0],
[10.0],
[11.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 8 multi channal and kernel_size=[2,3],stride=[2,3]
x = ops.arange(12, dtype="float32")
x = ops.reshape(x, [1, 2, 2, 3])
unfold_result = knn.unfold(x, [2, 3], stride=[2, 3])
except_result = ops.convert_to_tensor(
[
[
[0.0],
[1.0],
[2.0],
[3.0],
[4.0],
[5.0],
[6.0],
[7.0],
[8.0],
[9.0],
[10.0],
[11.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 9 multi channal and kernel_size=2,dilation=2
x = ops.arange(32, dtype="float32")
x = ops.reshape(x, [1, 2, 4, 4])
unfold_result = knn.unfold(x, 2, dilation=2)
except_result = ops.convert_to_tensor(
[
[
[0.0, 1.0, 4.0, 5.0],
[2.0, 3.0, 6.0, 7.0],
[8.0, 9.0, 12.0, 13.0],
[10.0, 11.0, 14.0, 15.0],
[16.0, 17.0, 20.0, 21.0],
[18.0, 19.0, 22.0, 23.0],
[24.0, 25.0, 28.0, 29.0],
[26.0, 27.0, 30.0, 31.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
# test 10 multi channal and kernel_size=2,padding=1
x = ops.arange(8, dtype="float32")
x = ops.reshape(x, [1, 2, 2, 2])
unfold_result = knn.unfold(x, 2, padding=1)
except_result = ops.convert_to_tensor(
[
[
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 3.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 3.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 2.0, 3.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 4.0, 5.0, 0.0, 6.0, 7.0],
[0.0, 0.0, 0.0, 4.0, 5.0, 0.0, 6.0, 7.0, 0.0],
[0.0, 4.0, 5.0, 0.0, 6.0, 7.0, 0.0, 0.0, 0.0],
[4.0, 5.0, 0.0, 6.0, 7.0, 0.0, 0.0, 0.0, 0.0],
]
]
)
self.assertAllClose(unfold_result, except_result)
| NNOpsBehaviorTest |
python | python-visualization__folium | folium/plugins/encoded.py | {
"start": 1558,
"end": 2652
} | class ____(_BaseFromEncoded):
"""Create PolyLines directly from the encoded string.
Parameters
----------
encoded: str
The raw encoded string from the Polyline Encoding Algorithm. See:
https://developers.google.com/maps/documentation/utilities/polylinealgorithm
**kwargs:
Polyline options as accepted by leaflet. See:
https://leafletjs.com/reference.html#polyline
Adapted from https://github.com/jieter/Leaflet.encoded
Examples
--------
>>> from folium import Map
>>> from folium.plugins import PolyLineFromEncoded
>>> m = Map()
>>> encoded = r"_p~iF~cn~U_ulLn{vA_mqNvxq`@"
>>> PolyLineFromEncoded(encoded=encoded, color="green").add_to(m)
"""
def __init__(self, encoded: str, **kwargs):
self._name = "PolyLineFromEncoded"
super().__init__(encoded=encoded)
self.options = path_options(line=True, **kwargs)
@property
def _encoding_type(self) -> str:
"""Return the name of folium object created from the encoded."""
return "Polyline"
| PolyLineFromEncoded |
python | huggingface__transformers | tests/trainer/test_data_collator.py | {
"start": 48273,
"end": 72352
} | class ____(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), [[0, 1, 2]] * 8)
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
# Labels can already be tensors
features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["labels"].tolist(), (list(range(8))))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.float32)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
def test_data_collator_with_flattening(self):
features = [
{"input_ids": [10, 11, 12]},
{"input_ids": [20, 21, 22, 23, 24, 25]},
{"input_ids": [30, 31, 32, 33, 34, 35, 36]},
]
data_collator = DataCollatorWithFlattening(return_tensors="np")
batch = data_collator(features)
for unexpected_key in [
"attention_mask",
"cu_seq_lens_k",
"cu_seq_lens_q",
"max_length_k",
"max_length_q",
"seq_idx",
]:
self.assertNotIn(unexpected_key, batch)
self.assertIn("position_ids", batch)
self.assertEqual(batch["input_ids"].shape, (1, 16))
self.assertEqual(
batch["input_ids"][0].tolist(), [10, 11, 12, 20, 21, 22, 23, 24, 25, 30, 31, 32, 33, 34, 35, 36]
)
self.assertEqual(batch["position_ids"].shape, (1, 16))
self.assertEqual(batch["position_ids"][0].tolist(), [0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6])
def test_data_collator_with_flattening_flash_attn_kwargs(self):
features = [
{"input_ids": [10, 11, 12]},
{"input_ids": [20, 21, 22, 23, 24, 25]},
{"input_ids": [30, 31, 32, 33, 34, 35, 36]},
]
data_collator = DataCollatorWithFlattening(return_tensors="np", return_flash_attn_kwargs=True)
batch = data_collator(features)
for unexpected_key in [
"attention_mask",
"seq_idx",
]:
self.assertNotIn(unexpected_key, batch)
for expected_key in [
"position_ids",
"cu_seq_lens_k",
"cu_seq_lens_q",
"max_length_k",
"max_length_q",
]:
self.assertIn(expected_key, batch)
self.assertEqual(batch["input_ids"].shape, (1, 16))
self.assertEqual(
batch["input_ids"][0].tolist(), [10, 11, 12, 20, 21, 22, 23, 24, 25, 30, 31, 32, 33, 34, 35, 36]
)
self.assertEqual(batch["position_ids"].shape, (1, 16))
self.assertEqual(batch["position_ids"][0].tolist(), [0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(batch["cu_seq_lens_k"].shape, (4,))
self.assertEqual(batch["cu_seq_lens_k"].tolist(), [0, 3, 9, 16])
self.assertEqual(batch["cu_seq_lens_q"].shape, (4,))
self.assertEqual(batch["cu_seq_lens_q"].tolist(), [0, 3, 9, 16])
# The flash attn max_length_{k,q} are simple python ints
self.assertEqual(batch["max_length_k"], 7)
self.assertEqual(batch["max_length_q"], 7)
def test_data_collator_with_flattening_seq_idx(self):
features = [
{"input_ids": [10, 11, 12]},
{"input_ids": [20, 21, 22, 23, 24, 25]},
{"input_ids": [30, 31, 32, 33, 34, 35, 36]},
]
data_collator = DataCollatorWithFlattening(return_tensors="np", return_seq_idx=True)
batch = data_collator(features)
for unexpected_key in [
"attention_mask",
"cu_seq_lens_k",
"cu_seq_lens_q",
"max_length_k",
"max_length_q",
]:
self.assertNotIn(unexpected_key, batch)
for expected_key in [
"position_ids",
"seq_idx",
]:
self.assertIn(expected_key, batch)
self.assertEqual(batch["input_ids"].shape, (1, 16))
self.assertEqual(
batch["input_ids"][0].tolist(), [10, 11, 12, 20, 21, 22, 23, 24, 25, 30, 31, 32, 33, 34, 35, 36]
)
self.assertEqual(batch["position_ids"].shape, (1, 16))
self.assertEqual(batch["position_ids"][0].tolist(), [0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(batch["seq_idx"].shape, batch["input_ids"].shape)
self.assertEqual(batch["seq_idx"][0].tolist(), [0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2])
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(
tokenizer, padding="max_length", max_length=10, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
def test_data_collator_for_seq2seq(self):
def create_features():
return [
{"input_ids": list(range(3)), "labels": list(range(3))},
{"input_ids": list(range(6)), "labels": list(range(6))},
]
tokenizer = BertTokenizer(self.vocab_file)
features = create_features()
data_collator = DataCollatorForSeq2Seq(tokenizer, padding=PaddingStrategy.LONGEST, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), list(range(3)) + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["input_ids"][1].tolist(), list(range(6)))
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), list(range(3)) + [-100] * 3)
self.assertEqual(batch["labels"][1].tolist(), list(range(6)))
data_collator = DataCollatorForSeq2Seq(
tokenizer, padding=PaddingStrategy.MAX_LENGTH, max_length=7, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 7))
self.assertEqual(batch["input_ids"][0].tolist(), list(range(3)) + [tokenizer.pad_token_id] * 4)
self.assertEqual(batch["input_ids"][1].tolist(), list(range(6)) + [tokenizer.pad_token_id] * 1)
self.assertEqual(batch["labels"].shape, (2, 7))
self.assertEqual(batch["labels"][0].tolist(), list(range(3)) + [-100] * 4)
self.assertEqual(batch["labels"][1].tolist(), list(range(6)) + [-100] * 1)
data_collator = DataCollatorForSeq2Seq(tokenizer, padding=PaddingStrategy.DO_NOT_PAD, return_tensors="np")
# numpy doesn't have issues handling unequal shapes via `dtype=object`
# with self.assertRaises(ValueError):
# data_collator(features)
batch = data_collator([features[0], features[0]])
self.assertEqual(batch["input_ids"][0].tolist(), features[0]["input_ids"])
self.assertEqual(batch["input_ids"][1].tolist(), features[0]["input_ids"])
self.assertEqual(batch["labels"][0].tolist(), features[0]["labels"])
self.assertEqual(batch["labels"][1].tolist(), features[0]["labels"])
data_collator = DataCollatorForSeq2Seq(
tokenizer, padding=PaddingStrategy.LONGEST, pad_to_multiple_of=8, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
# side effects on labels cause mismatch on longest strategy
features = create_features()
data_collator = DataCollatorForSeq2Seq(
tokenizer, padding=PaddingStrategy.LONGEST, label_pad_token_id=-1, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), list(range(3)) + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["input_ids"][1].tolist(), list(range(6)))
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), list(range(3)) + [-1] * 3)
self.assertEqual(batch["labels"][1].tolist(), list(range(6)))
for feature in features:
feature.pop("labels")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), list(range(3)) + [tokenizer.pad_token_id] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForLanguageModeling(
tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="np"
)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
tokenizer.pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_language_modeling_with_seed(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": list(range(1000))}, {"input_ids": list(range(1000))}]
# check if seed is respected between two different DataCollatorForLanguageModeling instances
data_collator = DataCollatorForLanguageModeling(tokenizer, seed=42, return_tensors="np")
batch_1 = data_collator(features)
self.assertEqual(batch_1["input_ids"].shape, (2, 1000))
self.assertEqual(batch_1["labels"].shape, (2, 1000))
data_collator = DataCollatorForLanguageModeling(tokenizer, seed=42, return_tensors="np")
batch_2 = data_collator(features)
self.assertEqual(batch_2["input_ids"].shape, (2, 1000))
self.assertEqual(batch_2["labels"].shape, (2, 1000))
self.assertTrue(np.all(batch_1["input_ids"] == batch_2["input_ids"]))
self.assertTrue(np.all(batch_1["labels"] == batch_2["labels"]))
data_collator = DataCollatorForLanguageModeling(tokenizer, seed=43, return_tensors="np")
batch_3 = data_collator(features)
self.assertEqual(batch_3["input_ids"].shape, (2, 1000))
self.assertEqual(batch_3["labels"].shape, (2, 1000))
self.assertFalse(np.all(batch_1["input_ids"] == batch_3["input_ids"]))
self.assertFalse(np.all(batch_1["labels"] == batch_3["labels"]))
def test_data_collator_for_whole_word_mask(self):
tokenizer = BertTokenizerFast(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="np")
input_tokens = [f"token_{i}" for i in range(8)]
tokenizer.add_tokens(input_tokens)
features = [tokenizer(" ".join(input_tokens), return_offsets_mapping=True) for _ in range(2)]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
# Features can already be tensors
features = [
tokenizer(" ".join(input_tokens), return_offsets_mapping=True).convert_to_tensors("np") for _ in range(2)
]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
def test_data_collator_for_whole_word_mask_with_seed(self):
tokenizer = BertTokenizerFast(self.vocab_file)
input_tokens = [f"token_{i}" for i in range(998)]
tokenizer.add_tokens(input_tokens)
features = [tokenizer(" ".join(input_tokens), return_offsets_mapping=True) for _ in range(2)]
# check if seed is respected between two different DataCollatorForWholeWordMask instances
data_collator = DataCollatorForWholeWordMask(tokenizer, seed=42, return_tensors="np")
batch_1 = data_collator(features)
self.assertEqual(batch_1["input_ids"].shape, (2, 1000))
self.assertEqual(batch_1["labels"].shape, (2, 1000))
data_collator = DataCollatorForWholeWordMask(tokenizer, seed=42, return_tensors="np")
batch_2 = data_collator(features)
self.assertEqual(batch_2["input_ids"].shape, (2, 1000))
self.assertEqual(batch_2["labels"].shape, (2, 1000))
self.assertTrue(np.all(batch_1["input_ids"] == batch_2["input_ids"]))
self.assertTrue(np.all(batch_1["labels"] == batch_2["labels"]))
data_collator = DataCollatorForWholeWordMask(tokenizer, seed=43, return_tensors="np")
batch_3 = data_collator(features)
self.assertEqual(batch_3["input_ids"].shape, (2, 1000))
self.assertEqual(batch_3["labels"].shape, (2, 1000))
self.assertFalse(np.all(batch_1["input_ids"] == batch_3["input_ids"]))
self.assertFalse(np.all(batch_1["labels"] == batch_3["labels"]))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 5))
self.assertEqual(batch["token_type_ids"].shape, (2, 5))
self.assertEqual(batch["labels"].shape, (2, 5))
self.assertEqual(batch["next_sentence_label"].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["token_type_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
self.assertEqual(batch["next_sentence_label"].shape, (2,))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": np.array([0, 1, 2, 3, 4]),
"token_type_ids": np.array([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 5))
self.assertEqual(batch["token_type_ids"].shape, (2, 5))
self.assertEqual(batch["labels"].shape, (2, 5))
self.assertEqual(batch["sentence_order_label"].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["token_type_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
self.assertEqual(batch["sentence_order_label"].shape, (2,))
| NumpyDataCollatorIntegrationTest |
python | numba__numba | numba/parfors/parfor.py | {
"start": 70200,
"end": 81710
} | class ____:
"""Parfor subpass to convert setitem on Arrays
"""
def __init__(self, pass_states):
"""
Parameters
----------
pass_states : ParforPassStates
"""
self.pass_states = pass_states
self.rewritten = []
def run(self, blocks):
pass_states = self.pass_states
# convert setitem expressions like A[C] = c or A[C] = B[C] to parfor,
# where C is a boolean array.
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = pass_states.array_analysis.get_equiv_set(label)
for instr in block.body:
if isinstance(instr, (ir.StaticSetItem, ir.SetItem)):
loc = instr.loc
target = instr.target
index = get_index_var(instr)
value = instr.value
target_typ = pass_states.typemap[target.name]
index_typ = pass_states.typemap[index.name]
value_typ = pass_states.typemap[value.name]
# Handle A[boolean_array] = <scalar or array>
if isinstance(target_typ, types.npytypes.Array):
if (isinstance(index_typ, types.npytypes.Array) and
isinstance(index_typ.dtype, types.Boolean) and
target_typ.ndim == index_typ.ndim):
# RHS is a scalar number
if isinstance(value_typ, types.Number):
new_instr = self._setitem_to_parfor(equiv_set,
loc, target, index, value)
self.rewritten.append(
dict(old=instr, new=new_instr,
reason='masked_assign_broadcast_scalar'),
)
instr = new_instr
# RHS is an array
elif isinstance(value_typ, types.npytypes.Array):
val_def = guard(get_definition, pass_states.func_ir,
value.name)
if (isinstance(val_def, ir.Expr) and
val_def.op == 'getitem' and
val_def.index.name == index.name):
new_instr = self._setitem_to_parfor(equiv_set,
loc, target, index, val_def.value)
self.rewritten.append(
dict(old=instr, new=new_instr,
reason='masked_assign_array'),
)
instr = new_instr
else:
# Handle A[:] = x
shape = equiv_set.get_shape(instr)
# Don't converted broadcasted setitems into parfors.
if isinstance(index_typ, types.BaseTuple):
# The sliced dims are those in the index that
# are made of slices. Count the numbers of slices
# in the index tuple.
sliced_dims = len(list(filter(
lambda x: isinstance(x, types.misc.SliceType),
index_typ.types)))
elif isinstance(index_typ, types.misc.SliceType):
# For singular indices there can be a bare slice
# and if so there is one dimension being set.
sliced_dims = 1
else:
sliced_dims = 0
# Only create a parfor for this setitem if we know the
# shape of the output and number of dimensions set is
# equal to the number of dimensions on the right side.
if (shape is not None and
(not isinstance(value_typ, types.npytypes.Array) or
sliced_dims == value_typ.ndim)):
new_instr = self._setitem_to_parfor(equiv_set,
loc, target, index, value, shape=shape)
self.rewritten.append(
dict(old=instr, new=new_instr,
reason='slice'),
)
instr = new_instr
new_body.append(instr)
block.body = new_body
def _setitem_to_parfor(self, equiv_set, loc, target, index, value, shape=None):
"""generate parfor from setitem node with a boolean or slice array indices.
The value can be either a scalar or an array variable, and if a boolean index
is used for the latter case, the same index must be used for the value too.
"""
pass_states = self.pass_states
scope = target.scope
arr_typ = pass_states.typemap[target.name]
el_typ = arr_typ.dtype
index_typ = pass_states.typemap[index.name]
init_block = ir.Block(scope, loc)
if shape:
# Slice index is being used on the target array, we'll have to create
# a sub-array so that the target dimension matches the given shape.
assert(isinstance(index_typ, types.BaseTuple) or
isinstance(index_typ, types.SliceType))
# setitem has a custom target shape
size_vars = shape
# create a new target array via getitem
subarr_var = ir.Var(scope, mk_unique_var("$subarr"), loc)
getitem_call = ir.Expr.getitem(target, index, loc)
subarr_typ = typing.arraydecl.get_array_index_type( arr_typ, index_typ).result
pass_states.typemap[subarr_var.name] = subarr_typ
pass_states.calltypes[getitem_call] = self._type_getitem((arr_typ, index_typ))
init_block.append(ir.Assign(getitem_call, subarr_var, loc))
target = subarr_var
else:
# Otherwise it is a boolean array that is used as index.
assert(isinstance(index_typ, types.ArrayCompatible))
size_vars = equiv_set.get_shape(target)
bool_typ = index_typ.dtype
# generate loopnests and size variables from lhs correlations
loopnests = []
index_vars = []
for size_var in size_vars:
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
pass_states.typemap[index_var.name] = types.uintp
loopnests.append(LoopNest(index_var, 0, size_var, 1))
# generate body
body_label = next_label()
body_block = ir.Block(scope, loc)
index_var, index_var_typ = _make_index_var(
pass_states.typemap, scope, index_vars, body_block)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('setitem', ''), pass_states.flags)
if shape:
# slice subarray
parfor.loop_body = {body_label: body_block}
true_block = body_block
end_label = None
else:
# boolean mask
true_label = next_label()
true_block = ir.Block(scope, loc)
end_label = next_label()
end_block = ir.Block(scope, loc)
parfor.loop_body = {body_label: body_block,
true_label: true_block,
end_label: end_block,
}
mask_var = ir.Var(scope, mk_unique_var("$mask_var"), loc)
pass_states.typemap[mask_var.name] = bool_typ
mask_val = ir.Expr.getitem(index, index_var, loc)
body_block.body.extend([
ir.Assign(mask_val, mask_var, loc),
ir.Branch(mask_var, true_label, end_label, loc)
])
value_typ = pass_states.typemap[value.name]
if isinstance(value_typ, types.npytypes.Array):
value_var = ir.Var(scope, mk_unique_var("$value_var"), loc)
pass_states.typemap[value_var.name] = value_typ.dtype
getitem_call = ir.Expr.getitem(value, index_var, loc)
pass_states.calltypes[getitem_call] = signature(
value_typ.dtype, value_typ, index_var_typ)
true_block.body.append(ir.Assign(getitem_call, value_var, loc))
else:
value_var = value
setitem_node = ir.SetItem(target, index_var, value_var, loc)
pass_states.calltypes[setitem_node] = signature(
types.none, pass_states.typemap[target.name], index_var_typ, el_typ)
true_block.body.append(setitem_node)
if end_label:
true_block.body.append(ir.Jump(end_label, loc))
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from setitem")
parfor.dump()
return parfor
def _type_getitem(self, args):
fnty = operator.getitem
return self.pass_states.typingctx.resolve_function_type(fnty, tuple(args), {})
def _make_index_var(typemap, scope, index_vars, body_block, force_tuple=False):
""" When generating a SetItem call to an array in a parfor, the general
strategy is to generate a tuple if the array is more than 1 dimension.
If it is 1 dimensional then you can use a simple variable. This routine
is also used when converting pndindex to parfor but pndindex requires a
tuple even if the iteration space is 1 dimensional. The pndindex use of
this function will use force_tuple to make the output index a tuple even
if it is one dimensional.
"""
ndims = len(index_vars)
loc = body_block.loc
if ndims > 1 or force_tuple:
tuple_var = ir.Var(scope, mk_unique_var(
"$parfor_index_tuple_var"), loc)
typemap[tuple_var.name] = types.containers.UniTuple(
types.uintp, ndims)
tuple_call = ir.Expr.build_tuple(list(index_vars), loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
body_block.body.append(tuple_assign)
return tuple_var, types.containers.UniTuple(types.uintp, ndims)
elif ndims == 1:
return index_vars[0], types.uintp
else:
raise errors.UnsupportedRewriteError(
"Parfor does not handle arrays of dimension 0",
loc=loc,
)
def _mk_parfor_loops(typemap, size_vars, scope, loc):
"""
Create loop index variables and build LoopNest objects for a parfor.
"""
loopnests = []
index_vars = []
for size_var in size_vars:
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
typemap[index_var.name] = types.uintp
loopnests.append(LoopNest(index_var, 0, size_var, 1))
return index_vars, loopnests
| ConvertSetItemPass |
python | optuna__optuna | optuna/study/_study_summary.py | {
"start": 306,
"end": 4210
} | class ____:
"""Basic attributes and aggregated results of a :class:`~optuna.study.Study`.
See also :func:`optuna.study.get_all_study_summaries`.
Attributes:
study_name:
Name of the :class:`~optuna.study.Study`.
direction:
:class:`~optuna.study.StudyDirection` of the :class:`~optuna.study.Study`.
.. note::
This attribute is only available during single-objective optimization.
directions:
A sequence of :class:`~optuna.study.StudyDirection` objects.
best_trial:
:class:`optuna.trial.FrozenTrial` with best objective value in the
:class:`~optuna.study.Study`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with
:func:`optuna.study.Study.set_user_attr`.
system_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally
set by Optuna.
.. warning::
Deprecated in v3.1.0. ``system_attrs`` argument will be removed in the future.
The removal of this feature is currently scheduled for v5.0.0,
but this schedule is subject to change.
See https://github.com/optuna/optuna/releases/tag/v3.1.0.
n_trials:
The number of trials ran in the :class:`~optuna.study.Study`.
datetime_start:
Datetime where the :class:`~optuna.study.Study` started.
"""
def __init__(
self,
study_name: str,
direction: StudyDirection | None,
best_trial: trial.FrozenTrial | None,
user_attrs: dict[str, Any],
system_attrs: dict[str, Any],
n_trials: int,
datetime_start: datetime.datetime | None,
study_id: int,
*,
directions: Sequence[StudyDirection] | None = None,
):
self.study_name = study_name
if direction is None and directions is None:
raise ValueError("Specify one of `direction` and `directions`.")
elif directions is not None:
self._directions = list(directions)
elif direction is not None:
self._directions = [direction]
else:
raise ValueError("Specify only one of `direction` and `directions`.")
self.best_trial = best_trial
self.user_attrs = user_attrs
self._system_attrs = system_attrs
self.n_trials = n_trials
self.datetime_start = datetime_start
self._study_id = study_id
def __eq__(self, other: Any) -> bool:
if not isinstance(other, StudySummary):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other: Any) -> bool:
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id < other._study_id
def __le__(self, other: Any) -> bool:
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id <= other._study_id
@property
def direction(self) -> StudyDirection:
if len(self._directions) > 1:
raise RuntimeError(
"This attribute is not available during multi-objective optimization."
)
return self._directions[0]
@property
def directions(self) -> Sequence[StudyDirection]:
return self._directions
@property
def system_attrs(self) -> dict[str, Any]:
optuna_warn(
"`system_attrs` has been deprecated in v3.1.0. "
"The removal of this feature is currently scheduled for v5.0.0, "
"but this schedule is subject to change. "
"See https://github.com/optuna/optuna/releases/tag/v3.1.0.",
FutureWarning,
)
return self._system_attrs
| StudySummary |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/literals3.py | {
"start": 97,
"end": 202
} | class ____(Enum):
SOME_ENUM_VALUE1 = "1"
SOME_ENUM_VALUE2 = "2"
SOME_ENUM_VALUE3 = "3"
| SomeEnum |
python | pypa__pipenv | pipenv/utils/markers.py | {
"start": 567,
"end": 23940
} | class ____:
os_name: Optional[str] = None
sys_platform: Optional[str] = None
platform_machine: Optional[str] = None
platform_python_implementation: Optional[str] = None
platform_release: Optional[str] = None
platform_system: Optional[str] = None
platform_version: Optional[str] = None
python_version: Optional[str] = None
python_full_version: Optional[str] = None
implementation_name: Optional[str] = None
implementation_version: Optional[str] = None
@classmethod
def make_marker(cls, marker_string):
try:
marker = Marker(marker_string)
except InvalidMarker:
raise RequirementError(
f"Invalid requirement: Invalid marker {marker_string!r}"
)
return marker
@classmethod
def from_pipfile(cls, name, pipfile):
attr_fields = list(fields(cls))
found_keys = [k.name for k in attr_fields if k.name in pipfile]
marker_strings = [f"{k} {pipfile[k]}" for k in found_keys]
if pipfile.get("markers"):
marker_strings.append(pipfile.get("markers"))
if pipfile.get("sys_platform"):
marker_strings.append(f"sys_platform '{pipfile['sys_platform']}'")
if pipfile.get("platform_machine"):
marker_strings.append(f"platform_machine '{pipfile['platform_machine']}'")
markers = set()
for marker in marker_strings:
markers.add(marker)
combined_marker = None
try:
combined_marker = cls.make_marker(" and ".join(sorted(markers)))
except RequirementError:
pass
else:
return combined_marker
def is_instance(item, cls):
# type: (Any, Type) -> bool
if isinstance(item, cls) or item.__class__.__name__ == cls.__name__:
return True
return False
def _tuplize_version(version):
# type: (str) -> Union[Tuple[()], Tuple[int, ...], Tuple[int, int, str]]
output = []
for idx, part in enumerate(version.split(".")):
if part == "*":
break
if idx in (0, 1):
# Only convert the major and minor identifiers into integers (if present),
# the patch identifier can include strings like 'b' marking a beta: ex 3.11.0b1
part = int(part)
output.append(part)
return tuple(output)
def _format_version(version) -> str:
if not isinstance(version, str):
return ".".join(str(i) for i in version)
return version
# Prefer [x,y) ranges.
REPLACE_RANGES = {">": ">=", "<=": "<"}
def _format_pyspec(specifier):
# type: (Union[str, Specifier]) -> Specifier
if isinstance(specifier, str):
if not specifier.startswith(tuple(Specifier._operators.keys())):
specifier = f"=={specifier}"
specifier = Specifier(specifier)
version = getattr(specifier, "version", specifier).rstrip()
if version:
if version.startswith("*"):
# don't parse invalid identifiers
return specifier
if version.endswith("*"):
if version.endswith(".*"):
version = version[:-2]
version = version.rstrip("*")
specifier = Specifier(f"{specifier.operator}{version}")
try:
op = REPLACE_RANGES[specifier.operator]
except KeyError:
return specifier
curr_tuple = _tuplize_version(version)
try:
next_tuple = (curr_tuple[0], curr_tuple[1] + 1)
except IndexError:
next_tuple = (curr_tuple[0], 1)
if (
next_tuple[0] not in MAX_VERSIONS
or not next_tuple[1] <= MAX_VERSIONS[next_tuple[0]]
):
if (
specifier.operator == "<"
and next_tuple[0] in MAX_VERSIONS
and curr_tuple[1] <= MAX_VERSIONS[next_tuple[0]]
):
op = "<="
next_tuple = (next_tuple[0], curr_tuple[1])
else:
return specifier
specifier = Specifier(f"{op}{_format_version(next_tuple)}")
return specifier
def _get_specs(specset):
if specset is None:
return
if is_instance(specset, Specifier):
new_specset = SpecifierSet()
specs = set()
specs.add(specset)
new_specset._specs = frozenset(specs)
specset = new_specset
if isinstance(specset, str):
specset = SpecifierSet(specset)
result = []
for spec in set(specset):
version = spec.version
op = spec.operator
if op in ("in", "not in"):
versions = version.split(",")
op = "==" if op == "in" else "!="
result += [(op, _tuplize_version(ver.strip())) for ver in versions]
else:
result.append((spec.operator, _tuplize_version(spec.version)))
return sorted(result, key=operator.itemgetter(1))
# TODO: Rename this to something meaningful
def _group_by_op(specs):
# type: (Union[Set[Specifier], SpecifierSet]) -> Iterator
specs = [_get_specs(x) for x in list(specs)]
flattened = [
((op, len(version) > 2), version) for spec in specs for op, version in spec
]
specs = sorted(flattened)
grouping = itertools.groupby(specs, key=operator.itemgetter(0))
return grouping
# TODO: rename this to something meaningful
def normalize_specifier_set(specs):
# type: (Union[str, SpecifierSet]) -> Optional[Set[Specifier]]
"""Given a specifier set, a string, or an iterable, normalize the
specifiers.
.. note:: This function exists largely to deal with ``pyzmq`` which handles
the ``requires_python`` specifier incorrectly, using ``3.7*`` rather than
the correct form of ``3.7.*``. This workaround can likely go away if
we ever introduce enforcement for metadata standards on PyPI.
:param Union[str, SpecifierSet] specs: Supplied specifiers to normalize
:return: A new set of specifiers or specifierset
:rtype: Union[Set[Specifier], :class:`~packaging.specifiers.SpecifierSet`]
"""
if not specs:
return None
if isinstance(specs, set):
return specs
# when we aren't dealing with a string at all, we can normalize this as usual
elif not isinstance(specs, str):
return {_format_pyspec(spec) for spec in specs}
spec_list = []
for spec in specs.split(","):
spec = spec.strip()
if spec.endswith(".*"):
spec = spec[:-2]
spec = spec.rstrip("*")
spec_list.append(spec)
return normalize_specifier_set(SpecifierSet(",".join(spec_list)))
# TODO: Check if this is used by anything public otherwise make it private
# And rename it to something meaningful
def get_sorted_version_string(version_set):
# type: (Set[AnyStr]) -> AnyStr
version_list = sorted(f"{_format_version(version)}" for version in version_set)
version = ", ".join(version_list)
return version
# TODO: Rename this to something meaningful
# TODO: Add a deprecation decorator and deprecate this -- i'm sure it's used
# in other libraries
def cleanup_pyspecs(specs, joiner="or"):
specs = normalize_specifier_set(specs)
# for != operator we want to group by version
# if all are consecutive, join as a list
results = {}
translation_map = {
# if we are doing an or operation, we need to use the min for >=
# this way OR(>=2.6, >=2.7, >=3.6) picks >=2.6
# if we do an AND operation we need to use MAX to be more selective
(">", ">="): {
"or": lambda x: _format_version(min(x)),
"and": lambda x: _format_version(max(x)),
},
# we use inverse logic here so we will take the max value if we are
# using OR but the min value if we are using AND
("<", "<="): {
"or": lambda x: _format_version(max(x)),
"and": lambda x: _format_version(min(x)),
},
# leave these the same no matter what operator we use
("!=", "==", "~=", "==="): {
"or": get_sorted_version_string,
"and": get_sorted_version_string,
},
}
op_translations = {
"!=": lambda x: "not in" if len(x) > 1 else "!=",
"==": lambda x: "in" if len(x) > 1 else "==",
}
translation_keys = list(translation_map.keys())
for op_and_version_type, versions in _group_by_op(tuple(specs)):
op = op_and_version_type[0]
versions = [version[1] for version in versions]
versions = sorted(dict.fromkeys(versions)) # remove duplicate entries
op_key = next(iter(k for k in translation_keys if op in k), None)
version_value = versions
if op_key is not None:
version_value = translation_map[op_key][joiner](versions)
if op in op_translations:
op = op_translations[op](versions)
results[(op, op_and_version_type[1])] = version_value
return sorted([(k[0], v) for k, v in results.items()], key=operator.itemgetter(1))
# TODO: Rename this to something meaningful
def fix_version_tuple(version_tuple):
# type: (Tuple[AnyStr, AnyStr]) -> Tuple[AnyStr, AnyStr]
op, version = version_tuple
max_major = max(MAX_VERSIONS.keys())
if version[0] > max_major:
return (op, (max_major, MAX_VERSIONS[max_major]))
# If version[0] is not in MAX_VERSIONS, we return the original version tuple.
# This fallback is intentional to allow handling of unexpected or future versions.
if version[0] not in MAX_VERSIONS:
return (op, version)
max_allowed = MAX_VERSIONS[version[0]]
if op == "<" and version[1] > max_allowed and version[1] - 1 <= max_allowed:
op = "<="
version = (version[0], version[1] - 1)
return (op, version)
def _ensure_marker(marker):
# type: (Union[str, Marker]) -> Marker
if not is_instance(marker, Marker):
return Marker(str(marker))
return marker
def gen_marker(mkr):
# type: (List[str]) -> Marker
m = Marker("python_version == '1'")
m._markers.pop()
m._markers.append(mkr)
return m
def _strip_extra(elements):
"""Remove the "extra == ..." operands from the list."""
return _strip_marker_elem("extra", elements)
def _strip_pyversion(elements):
return _strip_marker_elem("python_version", elements)
def _strip_marker_elem(elem_name, elements):
"""Remove the supplied element from the marker.
This is not a comprehensive implementation, but relies on an
important characteristic of metadata generation: The element's
operand is always associated with an "and" operator. This means that
we can simply remove the operand and the "and" operator associated
with it.
"""
extra_indexes = []
preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"]
for i, element in enumerate(elements):
if isinstance(element, list):
cancelled = _strip_marker_elem(elem_name, element)
if cancelled:
extra_indexes.append(i)
elif isinstance(element, tuple) and element[0].value == elem_name:
extra_indexes.append(i)
for i in reversed(extra_indexes):
del elements[i]
if i > 0 and elements[i - 1] in preceding_operators:
# Remove the "and" before it.
del elements[i - 1]
elif elements:
# This shouldn't ever happen, but is included for completeness.
# If there is not an "and" before this element, try to remove the
# operator after it.
del elements[0]
return not elements
def _get_stripped_marker(marker, strip_func):
"""Build a new marker which is cleaned according to `strip_func`"""
if not marker:
return None
marker = _ensure_marker(marker)
elements = marker._markers
strip_func(elements)
if elements:
return marker
return None
def get_without_extra(marker):
"""Build a new marker without the `extra == ...` part.
The implementation relies very deep into packaging's internals, but I don't
have a better way now (except implementing the whole thing myself).
This could return `None` if the `extra == ...` part is the only one in the
input marker.
"""
return _get_stripped_marker(marker, _strip_extra)
def get_without_pyversion(marker):
"""Built a new marker without the `python_version` part.
This could return `None` if the `python_version` section is the only
section in the marker.
"""
return _get_stripped_marker(marker, _strip_pyversion)
def _markers_collect_extras(markers, collection):
# Optimization: the marker element is usually appended at the end.
for el in reversed(markers):
if isinstance(el, tuple) and el[0].value == "extra" and el[1].value == "==":
collection.add(el[2].value)
elif isinstance(el, list):
_markers_collect_extras(el, collection)
def _markers_collect_pyversions(markers, collection):
local_collection = []
marker_format_str = "{0}"
for el in reversed(markers):
if isinstance(el, tuple) and el[0].value == "python_version":
new_marker = str(gen_marker(el))
local_collection.append(marker_format_str.format(new_marker))
elif isinstance(el, list):
_markers_collect_pyversions(el, local_collection)
if local_collection:
# local_collection = "{0}".format(" ".join(local_collection))
collection.extend(local_collection)
def _markers_contains_extra(markers):
# Optimization: the marker element is usually appended at the end.
return _markers_contains_key(markers, "extra")
def _markers_contains_pyversion(markers):
return _markers_contains_key(markers, "python_version")
def _markers_contains_key(markers, key):
for element in reversed(markers):
if isinstance(element, tuple) and element[0].value == key:
return True
elif isinstance(element, list) and _markers_contains_key(element, key):
return True
return False
def get_contained_extras(marker):
"""Collect "extra == ..." operands from a marker.
Returns a list of str. Each str is a specified extra in this marker.
"""
if not marker:
return set()
extras = set()
marker = _ensure_marker(marker)
_markers_collect_extras(marker._markers, extras)
return extras
def get_contained_pyversions(marker):
"""Collect all `python_version` operands from a marker."""
collection = []
if not marker:
return set()
marker = _ensure_marker(marker)
# Collect the (Variable, Op, Value) tuples and string joiners from the marker
_markers_collect_pyversions(marker._markers, collection)
marker_str = " and ".join(sorted(collection))
if not marker_str:
return set()
# Use the distlib dictionary parser to create a dictionary 'trie' which is a bit
# easier to reason about
marker_dict = markers.parse_marker(marker_str)[0]
version_set = set()
pyversions, _ = parse_marker_dict(marker_dict)
if isinstance(pyversions, set):
version_set.update(pyversions)
elif pyversions is not None:
version_set.add(pyversions)
# Each distinct element in the set was separated by an "and" operator in the marker
# So we will need to reduce them with an intersection here rather than a union
# in order to find the boundaries
versions = set()
if version_set:
versions = reduce(lambda x, y: x & y, version_set)
return versions
def contains_extra(marker):
"""Check whether a marker contains an "extra == ..." operand."""
if not marker:
return False
marker = _ensure_marker(marker)
return _markers_contains_extra(marker._markers)
def contains_pyversion(marker):
"""Check whether a marker contains a python_version operand."""
if not marker:
return False
marker = _ensure_marker(marker)
return _markers_contains_pyversion(marker._markers)
def _split_specifierset_str(specset_str, prefix="=="):
# type: (str, str) -> Set[Specifier]
"""Take a specifierset string and split it into a list to join for
specifier sets.
:param str specset_str: A string containing python versions, often comma separated
:param str prefix: A prefix to use when generating the specifier set
:return: A list of :class:`Specifier` instances generated with the provided prefix
:rtype: Set[Specifier]
"""
specifiers = set()
if "," not in specset_str and " " in specset_str:
values = [v.strip() for v in specset_str.split()]
else:
values = [v.strip() for v in specset_str.split(",")]
if prefix == "!=" and any(v in values for v in DEPRECATED_VERSIONS):
values += DEPRECATED_VERSIONS[:]
for value in sorted(values):
specifiers.add(Specifier(f"{prefix}{value}"))
return specifiers
def _get_specifiers_from_markers(marker_item):
"""Given a marker item, get specifiers from the version marker.
:param :class:`~packaging.markers.Marker` marker_sequence: A marker describing a version constraint
:return: A set of specifiers corresponding to the marker constraint
:rtype: Set[Specifier]
"""
specifiers = set()
if isinstance(marker_item, tuple):
variable, op, value = marker_item
if variable.value != "python_version":
return specifiers
if op.value == "in":
specifiers.update(_split_specifierset_str(value.value, prefix="=="))
elif op.value == "not in":
specifiers.update(_split_specifierset_str(value.value, prefix="!="))
else:
specifiers.add(Specifier(f"{op.value}{value.value}"))
elif isinstance(marker_item, list):
parts = get_specset(marker_item)
if parts:
specifiers.update(parts)
return specifiers
def get_specset(marker_list):
# type: (List) -> Optional[SpecifierSet]
specset = set()
_last_str = "and"
for marker_parts in marker_list:
if isinstance(marker_parts, str):
_last_str = marker_parts # noqa
else:
specset.update(_get_specifiers_from_markers(marker_parts))
specifiers = SpecifierSet()
specifiers._specs = frozenset(specset)
return specifiers
# TODO: Refactor this (reduce complexity)
def parse_marker_dict(marker_dict):
op = marker_dict["op"]
lhs = marker_dict["lhs"]
rhs = marker_dict["rhs"]
# This is where the spec sets for each side land if we have an "or" operator
side_spec_list = []
side_markers_list = []
finalized_marker = ""
# And if we hit the end of the parse tree we use this format string to make a marker
format_string = "{lhs} {op} {rhs}"
specset = SpecifierSet()
specs = set()
# Essentially we will iterate over each side of the parsed marker if either one is
# A mapping instance (i.e. a dictionary) and recursively parse and reduce the specset
# Union the "and" specs, intersect the "or"s to find the most appropriate range
if any(isinstance(side, Mapping) for side in (lhs, rhs)):
for side in (lhs, rhs):
side_specs = set()
side_markers = set()
if isinstance(side, Mapping):
merged_side_specs, merged_side_markers = parse_marker_dict(side)
side_specs.update(merged_side_specs)
side_markers.update(merged_side_markers)
else:
marker = _ensure_marker(side)
marker_parts = getattr(marker, "_markers", [])
if marker_parts[0][0].value == "python_version":
side_specs |= set(get_specset(marker_parts))
else:
side_markers.add(str(marker))
side_spec_list.append(side_specs)
side_markers_list.append(side_markers)
if op == "and":
# When we are "and"-ing things together, it probably makes the most sense
# to reduce them here into a single PySpec instance
specs = reduce(lambda x, y: set(x) | set(y), side_spec_list)
markers = reduce(lambda x, y: set(x) | set(y), side_markers_list)
if not specs and not markers:
return specset, finalized_marker
if markers and isinstance(markers, (tuple, list, Set)):
finalized_marker = Marker(" and ".join([m for m in markers if m]))
elif markers:
finalized_marker = str(markers)
specset._specs = frozenset(specs)
return specset, finalized_marker
# Actually when we "or" things as well we can also just turn them into a reduced
# set using this logic now
sides = reduce(lambda x, y: set(x) & set(y), side_spec_list)
finalized_marker = " or ".join(
[normalize_marker_str(m) for m in side_markers_list]
)
specset._specs = frozenset(sorted(sides))
return specset, finalized_marker
else:
# At the tip of the tree we are dealing with strings all around and they just need
# to be smashed together
specs = set()
if lhs == "python_version":
format_string = "{lhs}{op}{rhs}"
marker = Marker(format_string.format(**marker_dict))
marker_parts = getattr(marker, "_markers", [])
_set = get_specset(marker_parts)
if _set:
specs |= set(_set)
specset._specs = frozenset(specs)
return specset, finalized_marker
def _contains_micro_version(version_string):
return re.search(r"\d+\.\d+\.\d+", version_string) is not None
def merge_markers(m1, m2):
# type: (Marker, Marker) -> Optional[Marker]
if not all((m1, m2)):
return next(iter(v for v in (m1, m2) if v), None)
_markers = [str(_ensure_marker(marker)) for marker in (m1, m2)]
marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m])
return _ensure_marker(normalize_marker_str(marker_str))
def normalize_marker_str(marker) -> str:
marker_str = ""
if not marker:
return None
if not is_instance(marker, Marker):
marker = _ensure_marker(marker)
pyversion = get_contained_pyversions(marker)
marker = get_without_pyversion(marker)
if pyversion:
parts = cleanup_pyspecs(pyversion)
marker_str = " and ".join([format_pyversion(pv) for pv in parts])
if marker:
if marker_str:
marker_str = f"{marker_str!s} and {marker!s}"
else:
marker_str = f"{marker!s}"
return marker_str.replace('"', "'")
def marker_from_specifier(spec) -> Marker:
if not any(spec.startswith(k) for k in Specifier._operators):
if spec.strip().lower() in ["any", "<any>", "*"]:
return None
spec = f"=={spec}"
elif spec.startswith("==") and spec.count("=") > 3:
spec = "=={}".format(spec.lstrip("="))
if not spec:
return None
marker_segments = [
format_pyversion(marker_segment) for marker_segment in cleanup_pyspecs(spec)
]
marker_str = " and ".join(marker_segments).replace('"', "'")
return Marker(marker_str)
def format_pyversion(parts):
op, val = parts
version_marker = (
"python_full_version" if _contains_micro_version(val) else "python_version"
)
return f"{version_marker} {op} '{val}'"
| PipenvMarkers |
python | getsentry__sentry | src/sentry/workflow_engine/utils/dictpath.py | {
"start": 1957,
"end": 3729
} | class ____[T]:
def __init__(self, path: list[str], v: T) -> None:
self._path = path
self._v = v
def failed(self) -> bool:
return False
def get(self, fallback: T | None = None) -> T:
return self._v
def get_or_none(self) -> T | None:
return self._v
def is_type[V](self, t: type[V]) -> Result[V]:
v = self._v
if not isinstance(v, t):
return _failure(self._path, f"Expected {_tname(t)}, got {_tname(type(v))}")
return cast(Result[V], self)
def list_of[V](self, t: type[V]) -> Result[list[V]]:
rr = self.is_type(list)
if rr.failed():
return rr
v = rr.get()
if not all(isinstance(item, t) for item in v):
return _failure(self._path, f"Expected list of {_tname(t)}, got {_tname(type(v))}")
return cast(Result[list[V]], self)
def _success[T](path: list[str], v: T) -> Result[T]:
return _SuccessResultImpl[T](path, v)
def walk(data: object, *path: str) -> Result[object]:
"""
Traverse an object based on a path and return a result.
Example:
>>> walk({"a": {"b": "c"}}, "a", "b").get()
"c"
>>> walk({"a": {"b": "c"}}, "e", "f", "g").get()
ValueError: e.f.g: not found!
>>> walk({"a": {"b": "c"}}, "a", "b", "c").is_type(int).get()
ValueError: a.b.c: Expected int, got str
"""
current = data
history = []
for pathelt in path:
history.append(pathelt)
if not isinstance(current, dict):
return _failure(history, "was not a dict!")
if pathelt not in current:
return _failure(history, "not found!")
current = current[pathelt]
return _success(history, current)
| _SuccessResultImpl |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 16169,
"end": 17081
} | class ____(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch", setups="Calculate mean/std statistics from DataLoader if not provided")
| Normalize |
python | pandas-dev__pandas | asv_bench/benchmarks/reindex.py | {
"start": 1731,
"end": 2162
} | class ____:
params = [["pad", "backfill"], [date_range, period_range]]
param_names = ["method", "constructor"]
def setup(self, method, constructor):
N = 100000
self.idx = constructor("1/1/2000", periods=N, freq="1min")
self.ts = Series(np.random.randn(N), index=self.idx)[::2]
def time_reindex_method(self, method, constructor):
self.ts.reindex(self.idx, method=method)
| ReindexMethod |
python | gevent__gevent | src/gevent/testing/flaky.py | {
"start": 1795,
"end": 1931
} | class ____(FlakyTest):
"""
Use this when the flaky test is definitely caused by an
unexpected timeout.
"""
| FlakyTestTimeout |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate_generic.py | {
"start": 356,
"end": 11658
} | class ____(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(481, 840)
self.averageGroup = QtWidgets.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName("averageGroup")
self.gridLayout_5 = QtWidgets.QGridLayout(self.averageGroup)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.avgParamList = QtWidgets.QListWidget(self.averageGroup)
self.avgParamList.setObjectName("avgParamList")
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtWidgets.QFrame(Form)
self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171))
self.decimateGroup.setObjectName("decimateGroup")
self.gridLayout_4 = QtWidgets.QGridLayout(self.decimateGroup)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.clipToViewCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.clipToViewCheck.setObjectName("clipToViewCheck")
self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3)
self.maxTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setObjectName("maxTracesCheck")
self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2)
self.downsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3)
self.peakRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.peakRadio.setChecked(True)
self.peakRadio.setObjectName("peakRadio")
self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2)
self.maxTracesSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setObjectName("maxTracesSpin")
self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1)
self.forgetTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setObjectName("forgetTracesCheck")
self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3)
self.meanRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.meanRadio.setObjectName("meanRadio")
self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2)
self.subsampleRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.subsampleRadio.setObjectName("subsampleRadio")
self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2)
self.autoDownsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.autoDownsampleCheck.setChecked(True)
self.autoDownsampleCheck.setObjectName("autoDownsampleCheck")
self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Policy.Maximum, QtWidgets.QSizePolicy.Policy.Minimum)
self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1)
self.downsampleSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty("value", 1)
self.downsampleSpin.setObjectName("downsampleSpin")
self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1)
self.transformGroup = QtWidgets.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(10, 10, 171, 101))
self.transformGroup.setObjectName("transformGroup")
self.gridLayout = QtWidgets.QGridLayout(self.transformGroup)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.logXCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logXCheck.setObjectName("logXCheck")
self.gridLayout.addWidget(self.logXCheck, 2, 0, 1, 1)
self.derivativeCheck = QtWidgets.QCheckBox(self.transformGroup)
self.derivativeCheck.setObjectName("derivativeCheck")
self.gridLayout.addWidget(self.derivativeCheck, 4, 0, 1, 1)
self.phasemapCheck = QtWidgets.QCheckBox(self.transformGroup)
self.phasemapCheck.setObjectName("phasemapCheck")
self.gridLayout.addWidget(self.phasemapCheck, 5, 0, 1, 1)
self.fftCheck = QtWidgets.QCheckBox(self.transformGroup)
self.fftCheck.setObjectName("fftCheck")
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.logYCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logYCheck.setObjectName("logYCheck")
self.gridLayout.addWidget(self.logYCheck, 3, 0, 1, 1)
self.subtractMeanCheck = QtWidgets.QCheckBox(self.transformGroup)
self.subtractMeanCheck.setObjectName("subtractMeanCheck")
self.gridLayout.addWidget(self.subtractMeanCheck, 1, 0, 1, 1)
self.pointsGroup = QtWidgets.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName("pointsGroup")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.autoPointsCheck = QtWidgets.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName("autoPointsCheck")
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtWidgets.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName("gridGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName("gridLayout_2")
self.xGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.xGridCheck.setObjectName("xGridCheck")
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.yGridCheck.setObjectName("yGridCheck")
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtWidgets.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty("value", 128)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.gridAlphaSlider.setObjectName("gridAlphaSlider")
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtWidgets.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName("alphaGroup")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName("horizontalLayout")
self.autoAlphaCheck = QtWidgets.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName("autoAlphaCheck")
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtWidgets.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty("value", 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.alphaSlider.setObjectName("alphaSlider")
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "PyQtGraph"))
self.averageGroup.setToolTip(_translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available)."))
self.averageGroup.setTitle(_translate("Form", "Average"))
self.clipToViewCheck.setToolTip(_translate("Form", "Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced."))
self.clipToViewCheck.setText(_translate("Form", "Clip to View"))
self.maxTracesCheck.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed."))
self.maxTracesCheck.setText(_translate("Form", "Max Traces:"))
self.downsampleCheck.setText(_translate("Form", "Downsample"))
self.peakRadio.setToolTip(_translate("Form", "Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower."))
self.peakRadio.setText(_translate("Form", "Peak"))
self.maxTracesSpin.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed."))
self.forgetTracesCheck.setToolTip(_translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden)."))
self.forgetTracesCheck.setText(_translate("Form", "Forget hidden traces"))
self.meanRadio.setToolTip(_translate("Form", "Downsample by taking the mean of N samples."))
self.meanRadio.setText(_translate("Form", "Mean"))
self.subsampleRadio.setToolTip(_translate("Form", "Downsample by taking the first of N samples. This method is fastest and least accurate."))
self.subsampleRadio.setText(_translate("Form", "Subsample"))
self.autoDownsampleCheck.setToolTip(_translate("Form", "Automatically downsample data based on the visible range. This assumes X values are uniformly spaced."))
self.autoDownsampleCheck.setText(_translate("Form", "Auto"))
self.downsampleSpin.setToolTip(_translate("Form", "Downsample data before plotting. (plot every Nth sample)"))
self.downsampleSpin.setSuffix(_translate("Form", "x"))
self.logXCheck.setText(_translate("Form", "Log X"))
self.derivativeCheck.setText(_translate("Form", "dy/dx"))
self.phasemapCheck.setText(_translate("Form", "Y vs. Y\'"))
self.fftCheck.setText(_translate("Form", "Power Spectrum (FFT)"))
self.logYCheck.setText(_translate("Form", "Log Y"))
self.subtractMeanCheck.setText(_translate("Form", "Subtract Mean"))
self.pointsGroup.setTitle(_translate("Form", "Points"))
self.autoPointsCheck.setText(_translate("Form", "Auto"))
self.xGridCheck.setText(_translate("Form", "Show X Grid"))
self.yGridCheck.setText(_translate("Form", "Show Y Grid"))
self.label.setText(_translate("Form", "Opacity"))
self.alphaGroup.setTitle(_translate("Form", "Alpha"))
self.autoAlphaCheck.setText(_translate("Form", "Auto"))
| Ui_Form |
python | django__django | django/contrib/gis/db/models/sql/conversion.py | {
"start": 204,
"end": 1366
} | class ____(models.FloatField):
"Wrapper for Area values."
def __init__(self, geo_field):
super().__init__()
self.geo_field = geo_field
def get_prep_value(self, value):
if not isinstance(value, Area):
raise ValueError("AreaField only accepts Area measurement objects.")
return value
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return
area_att = connection.ops.get_area_att_for_field(self.geo_field)
return getattr(value, area_att) if area_att else value
def from_db_value(self, value, expression, connection):
if value is None:
return
# If the database returns a Decimal, convert it to a float as expected
# by the Python geometric objects.
if isinstance(value, Decimal):
value = float(value)
# If the units are known, convert value into area measure.
area_att = connection.ops.get_area_att_for_field(self.geo_field)
return Area(**{area_att: value}) if area_att else value
def get_internal_type(self):
return "AreaField"
| AreaField |
python | huggingface__transformers | tests/models/gemma3n/test_modeling_gemma3n.py | {
"start": 2034,
"end": 4510
} | class ____:
def __init__(
self,
parent,
batch_size=2,
num_channels=32, # feature_size / input_feat_size
sampling_rate=16_000,
raw_audio_length=8_000,
is_training=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sampling_rate = sampling_rate
self.raw_audio_length = raw_audio_length
self.is_training = is_training
def get_feature_extractor_config(self):
return {
"feature_size": self.num_channels,
"sampling_rate": self.sampling_rate,
"padding_value": 0.0,
"return_attention_mask": True,
"frame_length_ms": 32.0,
"hop_length_ms": 10.0,
"dither": 0.0, # Important for determinism
}
def get_audio_encoder_config(self):
return Gemma3nAudioConfig(
input_feat_size=self.num_channels,
hidden_size=32,
conf_num_attention_heads=4,
conf_num_hidden_layers=2,
sscp_conv_channel_size=(16, 8),
conf_conv_kernel_size=3,
conf_attention_chunk_size=4,
conf_attention_context_left=5,
)
def prepare_config_and_inputs_for_common(self):
# Prepare inputs for the audio encoder
feature_extractor_config = self.get_feature_extractor_config()
audio_encoder_config = self.get_audio_encoder_config()
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.raw_audio_length)).astype(np.float32)
raw_speech_2 = np.random.randn(self.raw_audio_length // 2).astype(np.float32)
raw_speech = [raw_speech_1, raw_speech_2]
feature_extractor = Gemma3nAudioFeatureExtractor(**feature_extractor_config)
audio_inputs = feature_extractor(raw_speech, return_tensors="pt")
input_features = audio_inputs["input_features"]
# The encoder expects a padding mask (True for padding), while the feature extractor
# returns an attention mask (True for valid tokens). We must invert it.
input_features_mask = ~audio_inputs["input_features_mask"].to(torch.bool)
inputs_dict = {
"audio_mel": input_features,
"audio_mel_mask": input_features_mask,
}
return audio_encoder_config, inputs_dict
@unittest.skip("Skipped for now!")
@require_torch
| Gemma3nAudioModelTester |
python | ZoranPandovski__al-go-rithms | data_structures/Graphs/graph/Python/topological_sort.py | {
"start": 104,
"end": 1633
} | class ____:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A recursive function used by topologicalSort
def topologicalSortUtil(self,v,visited,stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
# Push current vertex to stack which stores result
stack.insert(0,v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False]*self.V
stack =[]
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
# Print contents of stack
print(stack)
def main():
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print("Following is a Topological Sort of the given graph")
g.topologicalSort()
if __name__ == '__main__':
main() | Graph |
python | pytest-dev__pluggy | src/pluggy/_hooks.py | {
"start": 22481,
"end": 24611
} | class ____:
"""A hook implementation in a :class:`HookCaller`."""
__slots__ = (
"function",
"argnames",
"kwargnames",
"plugin",
"opts",
"plugin_name",
"wrapper",
"hookwrapper",
"optionalhook",
"tryfirst",
"trylast",
)
def __init__(
self,
plugin: _Plugin,
plugin_name: str,
function: _HookImplFunction[object],
hook_impl_opts: HookimplOpts,
) -> None:
""":meta private:"""
#: The hook implementation function.
self.function: Final = function
argnames, kwargnames = varnames(self.function)
#: The positional parameter names of ``function```.
self.argnames: Final = argnames
#: The keyword parameter names of ``function```.
self.kwargnames: Final = kwargnames
#: The plugin which defined this hook implementation.
self.plugin: Final = plugin
#: The :class:`HookimplOpts` used to configure this hook implementation.
self.opts: Final = hook_impl_opts
#: The name of the plugin which defined this hook implementation.
self.plugin_name: Final = plugin_name
#: Whether the hook implementation is a :ref:`wrapper <hookwrapper>`.
self.wrapper: Final = hook_impl_opts["wrapper"]
#: Whether the hook implementation is an :ref:`old-style wrapper
#: <old_style_hookwrappers>`.
self.hookwrapper: Final = hook_impl_opts["hookwrapper"]
#: Whether validation against a hook specification is :ref:`optional
#: <optionalhook>`.
self.optionalhook: Final = hook_impl_opts["optionalhook"]
#: Whether to try to order this hook implementation :ref:`first
#: <callorder>`.
self.tryfirst: Final = hook_impl_opts["tryfirst"]
#: Whether to try to order this hook implementation :ref:`last
#: <callorder>`.
self.trylast: Final = hook_impl_opts["trylast"]
def __repr__(self) -> str:
return f"<HookImpl plugin_name={self.plugin_name!r}, plugin={self.plugin!r}>"
@final
| HookImpl |
python | sympy__sympy | sympy/polys/puiseux.py | {
"start": 8041,
"end": 27500
} | class ____(Generic[Er]):
"""Puiseux polynomial. Represents a truncated Puiseux series.
See the :class:`PuiseuxRing` class for more information.
>>> from sympy import QQ
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x, y = puiseux_ring('x, y', QQ)
>>> p = 5*x**2 + 7*y**3
>>> p
7*y**3 + 5*x**2
The internal representation of a Puiseux polynomial wraps a normal
polynomial. To support negative powers the polynomial is considered to be
divided by a monomial.
>>> p2 = 1/x + 1/y**2
>>> p2.monom # x*y**2
(1, 2)
>>> p2.poly
x + y**2
>>> (y**2 + x) / (x*y**2) == p2
True
To support fractional powers the polynomial is considered to be a function
of ``x**(1/nx), y**(1/ny), ...``. The representation keeps track of a
monomial and a list of exponent denominators so that the polynomial can be
used to represent both negative and fractional powers.
>>> p3 = x**QQ(1,2) + y**QQ(2,3)
>>> p3.ns
(2, 3)
>>> p3.poly
x + y**2
See Also
========
sympy.polys.puiseux.PuiseuxRing
sympy.polys.rings.PolyElement
"""
ring: PuiseuxRing[Er]
poly: PolyElement[Er]
monom: MonI | None
ns: MonI | None
def __new__(cls, poly: PolyElement[Er], ring: PuiseuxRing[Er]) -> PuiseuxPoly[Er]:
return cls._new(ring, poly, None, None)
@classmethod
def _new(
cls,
ring: PuiseuxRing[Er],
poly: PolyElement[Er],
monom: MonI | None,
ns: MonI | None,
) -> PuiseuxPoly[Er]:
poly, monom, ns = cls._normalize(poly, monom, ns)
return cls._new_raw(ring, poly, monom, ns)
@classmethod
def _new_raw(
cls,
ring: PuiseuxRing[Er],
poly: PolyElement[Er],
monom: MonI | None,
ns: MonI | None,
) -> PuiseuxPoly[Er]:
obj = object.__new__(cls)
obj.ring = ring
obj.poly = poly
obj.monom = monom
obj.ns = ns
return obj
def __eq__(self, other: object) -> bool:
if isinstance(other, PuiseuxPoly):
return (
self.poly == other.poly
and self.monom == other.monom
and self.ns == other.ns
)
elif self.monom is None and self.ns is None:
return self.poly.__eq__(other)
else:
return NotImplemented
@classmethod
def _normalize(
cls,
poly: PolyElement[Er],
monom: MonI | None,
ns: MonI | None,
) -> tuple[PolyElement[Er], MonI | None, MonI | None]:
if monom is None and ns is None:
return poly, None, None
if monom is not None:
degs: MonI = tuple([max(d, 0) for d in poly.tail_degrees()]) # type: ignore
if all(di >= mi for di, mi in zip(degs, monom)):
poly = _div_poly_monom(poly, monom)
monom = None
elif any(degs):
poly = _div_poly_monom(poly, degs)
monom = _div_monom(monom, degs)
if ns is not None:
factors_d, [poly_d] = poly.deflate()
degrees = poly.degrees()
monom_d = monom if monom is not None else [0] * len(degrees)
ns_new = []
monom_new = []
inflations = []
for fi, ni, di, mi in zip(factors_d, ns, degrees, monom_d):
if di == 0:
g = gcd(ni, mi)
else:
g = gcd(fi, ni, mi)
ns_new.append(ni // g)
monom_new.append(mi // g)
inflations.append(fi // g)
if any(infl > 1 for infl in inflations):
poly_d = poly_d.inflate(inflations)
poly = poly_d
if monom is not None:
monom = tuple(monom_new)
if all(n == 1 for n in ns_new):
ns = None
else:
ns = tuple(ns_new)
return poly, monom, ns
@classmethod
def _monom_fromint(
cls,
monom: MonI,
dmonom: MonI | None,
ns: MonI | None,
) -> MonQ:
if dmonom is not None and ns is not None:
return tuple(QQ(mi - di, ni) for mi, di, ni in zip(monom, dmonom, ns))
elif dmonom is not None:
return tuple(QQ(mi - di) for mi, di in zip(monom, dmonom))
elif ns is not None:
return tuple(QQ(mi, ni) for mi, ni in zip(monom, ns))
else:
return tuple(QQ(mi) for mi in monom)
@classmethod
def _monom_toint(
cls,
monom: MonQ,
dmonom: MonI | None,
ns: MonI | None,
) -> MonI:
if dmonom is not None and ns is not None:
return tuple(
int((mi * ni).numerator + di) for mi, di, ni in zip(monom, dmonom, ns)
)
elif dmonom is not None:
return tuple(int(mi.numerator + di) for mi, di in zip(monom, dmonom))
elif ns is not None:
return tuple(int((mi * ni).numerator) for mi, ni in zip(monom, ns))
else:
return tuple(int(mi.numerator) for mi in monom)
def itermonoms(self) -> Iterator[MonQ]:
"""Iterate over the monomials of a Puiseux polynomial.
>>> from sympy import QQ
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x, y = puiseux_ring('x, y', QQ)
>>> p = 5*x**2 + 7*y**3
>>> list(p.itermonoms())
[(2, 0), (0, 3)]
>>> p[(2, 0)]
5
"""
monom, ns = self.monom, self.ns
for m in self.poly.itermonoms():
yield self._monom_fromint(m, monom, ns)
def monoms(self) -> list[MonQ]:
"""Return a list of the monomials of a Puiseux polynomial."""
return list(self.itermonoms())
def __iter__(self) -> Iterator[MonQ]:
return self.itermonoms()
def __getitem__(self, monom: MonQ) -> Er:
monomq = self._monom_toint(monom, self.monom, self.ns)
return self.poly[monomq]
def __len__(self) -> int:
return len(self.poly)
def iterterms(self) -> Iterator[tuple[MonQ, Er]]:
"""Iterate over the terms of a Puiseux polynomial.
>>> from sympy import QQ
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x, y = puiseux_ring('x, y', QQ)
>>> p = 5*x**2 + 7*y**3
>>> list(p.iterterms())
[((2, 0), 5), ((0, 3), 7)]
"""
monom, ns = self.monom, self.ns
for m, coeff in self.poly.iterterms():
mq = self._monom_fromint(m, monom, ns)
yield mq, coeff
def terms(self) -> list[tuple[MonQ, Er]]:
"""Return a list of the terms of a Puiseux polynomial."""
return list(self.iterterms())
@property
def is_term(self) -> bool:
"""Return True if the Puiseux polynomial is a single term."""
return self.poly.is_term
def to_dict(self) -> dict[MonQ, Er]:
"""Return a dictionary representation of a Puiseux polynomial."""
return dict(self.iterterms())
@classmethod
def from_dict(
cls,
terms: Map[MonQ, Er | Expr | int],
ring: PuiseuxRing[Er],
) -> PuiseuxPoly[Er]:
"""Create a Puiseux polynomial from a dictionary of terms.
>>> from sympy import QQ
>>> from sympy.polys.puiseux import puiseux_ring, PuiseuxPoly
>>> R, x = puiseux_ring('x', QQ)
>>> PuiseuxPoly.from_dict({(QQ(1,2),): QQ(3)}, R)
3*x**(1/2)
>>> R.from_dict({(QQ(1,2),): QQ(3)})
3*x**(1/2)
"""
ns = [1] * ring.ngens
mon: list[MPQ | int] = [0] * ring.ngens
for mo in terms:
ns = [lcm(n, m.denominator) for n, m in zip(ns, mo)]
mon = [min(m, n) for m, n in zip(mo, mon)]
if not any(mon):
monom = None
else:
monom = tuple(-int((m * n).numerator) for m, n in zip(mon, ns))
if all(n == 1 for n in ns):
ns_final = None
else:
ns_final = tuple(ns)
def conv(m: MonQ) -> MonI:
return cls._monom_toint(m, monom, ns_final)
terms_p = {conv(m): coeff for m, coeff in terms.items()}
poly = ring.poly_ring.from_dict(terms_p)
return cls._new(ring, poly, monom, ns_final)
def as_expr(self) -> Expr:
"""Convert a Puiseux polynomial to :class:`~sympy.core.expr.Expr`.
>>> from sympy import QQ, Expr
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x = puiseux_ring('x', QQ)
>>> p = 5*x**2 + 7*x**3
>>> p.as_expr()
7*x**3 + 5*x**2
>>> isinstance(_, Expr)
True
"""
ring = self.ring
dom = ring.domain
symbols = ring.symbols
terms = []
for monom, coeff in self.iterterms():
coeff_expr = dom.to_sympy(coeff)
monoms_expr = []
for i, m in enumerate(monom):
monoms_expr.append(symbols[i] ** m)
terms.append(Mul(coeff_expr, *monoms_expr))
return Add(*terms)
def __repr__(self) -> str:
def format_power(base: str, exp: MPQ) -> str:
if exp == 1:
return base
elif exp >= 0 and int(exp) == exp:
return f"{base}**{exp}"
else:
return f"{base}**({exp})"
ring = self.ring
dom = ring.domain
syms = [str(s) for s in ring.symbols]
terms_str = []
for monom, coeff in sorted(self.terms()):
monom_str = "*".join(format_power(s, e) for s, e in zip(syms, monom) if e)
if coeff == dom.one:
if monom_str:
terms_str.append(monom_str)
else:
terms_str.append("1")
elif not monom_str:
terms_str.append(str(coeff))
else:
terms_str.append(f"{coeff}*{monom_str}")
return " + ".join(terms_str)
def _unify(
self, other: PuiseuxPoly[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], MonI | None, MonI | None]:
"""Bring two Puiseux polynomials to a common monom and ns."""
poly1, monom1, ns1 = self.poly, self.monom, self.ns
poly2, monom2, ns2 = other.poly, other.monom, other.ns
if monom1 == monom2 and ns1 == ns2:
return poly1, poly2, monom1, ns1
if ns1 == ns2:
ns = ns1
elif ns1 is not None and ns2 is not None:
ns = tuple(lcm(n1, n2) for n1, n2 in zip(ns1, ns2))
f1 = [n // n1 for n, n1 in zip(ns, ns1)]
f2 = [n // n2 for n, n2 in zip(ns, ns2)]
poly1 = poly1.inflate(f1)
poly2 = poly2.inflate(f2)
if monom1 is not None:
monom1 = tuple(m * f for m, f in zip(monom1, f1))
if monom2 is not None:
monom2 = tuple(m * f for m, f in zip(monom2, f2))
elif ns2 is not None:
ns = ns2
poly1 = poly1.inflate(ns)
if monom1 is not None:
monom1 = tuple(m * n for m, n in zip(monom1, ns))
elif ns1 is not None:
ns = ns1
poly2 = poly2.inflate(ns)
if monom2 is not None:
monom2 = tuple(m * n for m, n in zip(monom2, ns))
else:
assert False
if monom1 == monom2:
monom = monom1
elif monom1 is not None and monom2 is not None:
monom = tuple(max(m1, m2) for m1, m2 in zip(monom1, monom2))
poly1 = _mul_poly_monom(poly1, _div_monom(monom, monom1))
poly2 = _mul_poly_monom(poly2, _div_monom(monom, monom2))
elif monom2 is not None:
monom = monom2
poly1 = _mul_poly_monom(poly1, monom2)
elif monom1 is not None:
monom = monom1
poly2 = _mul_poly_monom(poly2, monom1)
else:
assert False
return poly1, poly2, monom, ns
def __pos__(self) -> PuiseuxPoly[Er]:
return self
def __neg__(self) -> PuiseuxPoly[Er]:
return self._new_raw(self.ring, -self.poly, self.monom, self.ns)
def __add__(self, other: PuiseuxPoly[Er] | Er | int) -> PuiseuxPoly[Er]:
if isinstance(other, PuiseuxPoly):
if self.ring != other.ring:
raise ValueError("Cannot add Puiseux polynomials from different rings")
return self._add(other)
domain = self.ring.domain
if isinstance(other, int):
return self._add_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._add_ground(other)
else:
return NotImplemented
def __radd__(self, other: Er | int) -> PuiseuxPoly[Er]:
domain = self.ring.domain
if isinstance(other, int):
return self._add_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._add_ground(other)
else:
return NotImplemented
def __sub__(self, other: PuiseuxPoly[Er] | Er | int) -> PuiseuxPoly[Er]:
if isinstance(other, PuiseuxPoly):
if self.ring != other.ring:
raise ValueError(
"Cannot subtract Puiseux polynomials from different rings"
)
return self._sub(other)
domain = self.ring.domain
if isinstance(other, int):
return self._sub_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._sub_ground(other)
else:
return NotImplemented
def __rsub__(self, other: Er | int) -> PuiseuxPoly[Er]:
domain = self.ring.domain
if isinstance(other, int):
return self._rsub_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._rsub_ground(other)
else:
return NotImplemented
def __mul__(self, other: PuiseuxPoly[Er] | Er | int) -> PuiseuxPoly[Er]:
if isinstance(other, PuiseuxPoly):
if self.ring != other.ring:
raise ValueError(
"Cannot multiply Puiseux polynomials from different rings"
)
return self._mul(other)
domain = self.ring.domain
if isinstance(other, int):
return self._mul_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._mul_ground(other)
else:
return NotImplemented
def __rmul__(self, other: Er | int) -> PuiseuxPoly[Er]:
domain = self.ring.domain
if isinstance(other, int):
return self._mul_ground(domain.convert_from(QQ(other), QQ))
elif domain.of_type(other):
return self._mul_ground(other)
else:
return NotImplemented
def __pow__(self, other: int | MPQ) -> PuiseuxPoly[Er]:
if isinstance(other, int):
if other >= 0:
return self._pow_pint(other)
else:
return self._pow_nint(-other)
elif QQ.of_type(other):
return self._pow_rational(other)
else:
return NotImplemented
def __truediv__(self, other: PuiseuxPoly[Er] | Er | int) -> PuiseuxPoly[Er]:
if isinstance(other, PuiseuxPoly):
if self.ring != other.ring:
raise ValueError(
"Cannot divide Puiseux polynomials from different rings"
)
return self._mul(other._inv())
domain = self.ring.domain
if isinstance(other, int):
return self._mul_ground(domain.convert_from(QQ(1, other), QQ))
elif domain.of_type(other):
return self._div_ground(other)
else:
return NotImplemented
def __rtruediv__(self, other: Er | int) -> PuiseuxPoly[Er]:
if isinstance(other, int):
return self._inv()._mul_ground(self.ring.domain.convert_from(QQ(other), QQ))
elif self.ring.domain.of_type(other):
return self._inv()._mul_ground(other)
else:
return NotImplemented
def _add(self, other: PuiseuxPoly[Er]) -> PuiseuxPoly[Er]:
poly1, poly2, monom, ns = self._unify(other)
return self._new(self.ring, poly1 + poly2, monom, ns)
def _add_ground(self, ground: Er) -> PuiseuxPoly[Er]:
return self._add(self.ring.ground_new(ground))
def _sub(self, other: PuiseuxPoly[Er]) -> PuiseuxPoly[Er]:
poly1, poly2, monom, ns = self._unify(other)
return self._new(self.ring, poly1 - poly2, monom, ns)
def _sub_ground(self, ground: Er) -> PuiseuxPoly[Er]:
return self._sub(self.ring.ground_new(ground))
def _rsub_ground(self, ground: Er) -> PuiseuxPoly[Er]:
return self.ring.ground_new(ground)._sub(self)
def _mul(self, other: PuiseuxPoly[Er]) -> PuiseuxPoly[Er]:
poly1, poly2, monom, ns = self._unify(other)
if monom is not None:
monom = tuple(2 * e for e in monom)
return self._new(self.ring, poly1 * poly2, monom, ns)
def _mul_ground(self, ground: Er) -> PuiseuxPoly[Er]:
return self._new_raw(self.ring, self.poly * ground, self.monom, self.ns)
def _div_ground(self, ground: Er) -> PuiseuxPoly[Er]:
return self._new_raw(self.ring, self.poly / ground, self.monom, self.ns)
def _pow_pint(self, n: int) -> PuiseuxPoly[Er]:
assert n >= 0
monom = self.monom
if monom is not None:
monom = tuple(m * n for m in monom)
return self._new(self.ring, self.poly**n, monom, self.ns)
def _pow_nint(self, n: int) -> PuiseuxPoly[Er]:
return self._inv()._pow_pint(n)
def _pow_rational(self, n: MPQ) -> PuiseuxPoly[Er]:
if not self.is_term:
raise ValueError("Only monomials can be raised to a rational power")
[(monom, coeff)] = self.terms()
domain = self.ring.domain
if not domain.is_one(coeff):
raise ValueError("Only monomials can be raised to a rational power")
monom = tuple(m * n for m in monom)
return self.ring.from_dict({monom: domain.one})
def _inv(self) -> PuiseuxPoly[Er]:
if not self.is_term:
raise ValueError("Only terms can be inverted")
[(monom, coeff)] = self.terms()
domain = self.ring.domain
if not domain.is_Field and not domain.is_one(coeff):
raise ValueError("Cannot invert non-unit coefficient")
monom = tuple(-m for m in monom)
coeff = 1 / coeff # type: ignore
return self.ring.from_dict({monom: coeff})
def diff(self, x: PuiseuxPoly[Er]) -> PuiseuxPoly[Er]:
"""Differentiate a Puiseux polynomial with respect to a variable.
>>> from sympy import QQ
>>> from sympy.polys.puiseux import puiseux_ring
>>> R, x, y = puiseux_ring('x, y', QQ)
>>> p = 5*x**2 + 7*y**3
>>> p.diff(x)
10*x
>>> p.diff(y)
21*y**2
"""
ring = self.ring
i = ring.index(x)
g: dict[MonQ, Er] = {}
for expv, coeff in self.iterterms():
n = expv[i]
if n:
e = list(expv)
e[i] -= 1
g[tuple(e)] = coeff * n # type: ignore
return ring.from_dict(g)
| PuiseuxPoly |
python | huggingface__transformers | src/transformers/models/vitpose/modeling_vitpose.py | {
"start": 6288,
"end": 7734
} | class ____(nn.Module):
"""
Classic decoding head consisting of a 2 deconvolutional blocks, followed by a 1x1 convolution layer,
turning the feature maps into heatmaps.
"""
def __init__(self, config: VitPoseConfig):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(
config.backbone_config.hidden_size, 256, kernel_size=4, stride=2, padding=1, bias=False
)
self.batchnorm1 = nn.BatchNorm2d(256)
self.relu1 = nn.ReLU()
self.deconv2 = nn.ConvTranspose2d(256, 256, kernel_size=4, stride=2, padding=1, bias=False)
self.batchnorm2 = nn.BatchNorm2d(256)
self.relu2 = nn.ReLU()
self.conv = nn.Conv2d(256, config.num_labels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None):
hidden_state = self.deconv1(hidden_state)
hidden_state = self.batchnorm1(hidden_state)
hidden_state = self.relu1(hidden_state)
hidden_state = self.deconv2(hidden_state)
hidden_state = self.batchnorm2(hidden_state)
hidden_state = self.relu2(hidden_state)
heatmaps = self.conv(hidden_state)
if flip_pairs is not None:
heatmaps = flip_back(heatmaps, flip_pairs)
return heatmaps
@auto_docstring(
custom_intro="""
The VitPose model with a pose estimation head on top.
"""
)
| VitPoseClassicDecoder |
python | django__django | tests/model_forms/models.py | {
"start": 14533,
"end": 14699
} | class ____(models.Model):
number = models.ForeignKey("Number", on_delete=models.CASCADE)
die = models.ForeignKey("Dice", on_delete=models.CASCADE)
| NumbersToDice |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 217,
"end": 383
} | class ____(object):
Red = 1
# \brief color Green
# Green is bit_flag with value (1u << 1)
Green = 2
# \brief color Blue (1u << 3)
Blue = 8
| Color |
python | dateutil__dateutil | src/dateutil/parser/_parser.py | {
"start": 49747,
"end": 58000
} | class ____(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
| _tzparser |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/vtk_m/package.py | {
"start": 220,
"end": 1072
} | class ____(CMakePackage):
"""This is a fake vtk-m package used to demonstrate virtual package providers
with dependencies."""
homepage = "http://www.spack-fake-vtk-m.org"
url = "http://www.spack-fake-vtk-m.org/downloads/vtk-m-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("cuda", default=False, description="Build with CUDA")
variant(
"cuda_arch",
description="CUDA architecture",
default="none",
values=("70", "none"),
multi=False,
when="+cuda",
)
variant("rocm", default=False, description="Enable ROCm support")
variant(
"amdgpu_target",
default="none",
description="AMD GPU architecture",
values=("gfx900", "none"),
multi=False,
when="+rocm",
)
depends_on("cmake@3.18:")
| VtkM |
python | kubernetes-client__python | kubernetes/client/models/v1_group_version_for_discovery.py | {
"start": 383,
"end": 5076
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group_version': 'str',
'version': 'str'
}
attribute_map = {
'group_version': 'groupVersion',
'version': 'version'
}
def __init__(self, group_version=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1GroupVersionForDiscovery - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group_version = None
self._version = None
self.discriminator = None
self.group_version = group_version
self.version = version
@property
def group_version(self):
"""Gets the group_version of this V1GroupVersionForDiscovery. # noqa: E501
groupVersion specifies the API group and version in the form \"group/version\" # noqa: E501
:return: The group_version of this V1GroupVersionForDiscovery. # noqa: E501
:rtype: str
"""
return self._group_version
@group_version.setter
def group_version(self, group_version):
"""Sets the group_version of this V1GroupVersionForDiscovery.
groupVersion specifies the API group and version in the form \"group/version\" # noqa: E501
:param group_version: The group_version of this V1GroupVersionForDiscovery. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and group_version is None: # noqa: E501
raise ValueError("Invalid value for `group_version`, must not be `None`") # noqa: E501
self._group_version = group_version
@property
def version(self):
"""Gets the version of this V1GroupVersionForDiscovery. # noqa: E501
version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion. # noqa: E501
:return: The version of this V1GroupVersionForDiscovery. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1GroupVersionForDiscovery.
version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion. # noqa: E501
:param version: The version of this V1GroupVersionForDiscovery. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GroupVersionForDiscovery):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GroupVersionForDiscovery):
return True
return self.to_dict() != other.to_dict()
| V1GroupVersionForDiscovery |
python | pytorch__pytorch | test/distributed/test_symmetric_memory.py | {
"start": 43593,
"end": 45962
} | class ____(MultiProcContinuousTest):
def _init_process(self) -> None:
torch.cuda.set_device(self.device)
enable_symm_mem_for_group(dist.group.WORLD.group_name)
torch.manual_seed(42 + self.rank)
torch._inductor.config._collective.auto_select = True
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
@skip("Fails with 'one_shot_all_reduce' not found in AOT graph, TODO: fix")
@skip_if_rocm_multiprocess # requires registered-buffer support
@skip_if_lt_x_gpu(2)
@fresh_cache()
def test_lowering_one_shot_all_reduce(self):
self._init_process()
arg = torch.rand(4, 4, device=self.device)
def func_0(x):
x = x + 1
x = torch.ops._c10d_functional.all_reduce(x, "sum", "0")
return torch.ops._c10d_functional.wait_tensor(x)
compiled_0 = torch.compile(func_0, fullgraph=True)
code_0 = run_and_get_triton_code(compiled_0, arg)
self.assertIn("one_shot_all_reduce", code_0)
self.assertNotIn("return (buf0", code_0)
# All-reduce on a slice view
def func_1(x):
x = x + 1
x = x[2:]
x = torch.ops._c10d_functional.all_reduce(x, "sum", "0")
return torch.ops._c10d_functional.wait_tensor(x)
compiled_1 = torch.compile(func_1, fullgraph=True)
code_1 = run_and_get_triton_code(compiled_1, arg)
self.assertIn("one_shot_all_reduce", code_1)
self.assertNotIn("return (buf0", code_1)
# All-reduce on input
def func_2(x):
x = torch.ops._c10d_functional.all_reduce(x, "sum", "0")
return torch.ops._c10d_functional.wait_tensor(x)
compiled_2 = torch.compile(func_2, fullgraph=True)
code_2 = run_and_get_triton_code(compiled_2, arg)
self.assertNotIn("one_shot_all_reduce", code_2)
# All-reduce on matmul output
def func_3(x):
x = x @ x
x = torch.ops._c10d_functional.all_reduce(x, "sum", "0")
return torch.ops._c10d_functional.wait_tensor(x)
compiled_3 = torch.compile(func_3, fullgraph=True)
code_3 = run_and_get_triton_code(compiled_3, arg)
self.assertIn("one_shot_all_reduce", code_3)
self.assertNotIn("return (buf0", code_3)
| LoweringTest |
python | sqlalchemy__sqlalchemy | test/orm/test_naturalpks.py | {
"start": 41986,
"end": 50349
} | class ____(fixtures.MappedTest):
"""Test cascades of pk->pk/fk on joined table inh."""
# mssql doesn't allow ON UPDATE on self-referential keys
__unsupported_on__ = ("mssql",)
__requires__ = ("skip_mysql_on_windows",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table(
"person",
metadata,
Column("name", String(50), primary_key=True),
Column("type", String(50), nullable=False),
test_needs_fk=True,
)
Table(
"engineer",
metadata,
Column(
"name",
String(50),
ForeignKey("person.name", **fk_args),
primary_key=True,
),
Column("primary_language", String(50)),
Column(
"boss_name", String(50), ForeignKey("manager.name", **fk_args)
),
test_needs_fk=True,
)
Table(
"manager",
metadata,
Column(
"name",
String(50),
ForeignKey("person.name", **fk_args),
primary_key=True,
),
Column("paperwork", String(50)),
test_needs_fk=True,
)
Table(
"owner",
metadata,
Column(
"name",
String(50),
ForeignKey("manager.name", **fk_args),
primary_key=True,
),
Column("owner_name", String(50)),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Owner(Manager):
pass
def _mapping_fixture(self, threelevel, passive_updates):
Person, Manager, Engineer, Owner = self.classes(
"Person", "Manager", "Engineer", "Owner"
)
person, manager, engineer, owner = self.tables(
"person", "manager", "engineer", "owner"
)
self.mapper_registry.map_imperatively(
Person,
person,
polymorphic_on=person.c.type,
polymorphic_identity="person",
passive_updates=passive_updates,
)
self.mapper_registry.map_imperatively(
Engineer,
engineer,
inherits=Person,
polymorphic_identity="engineer",
properties={
"boss": relationship(
Manager,
primaryjoin=manager.c.name == engineer.c.boss_name,
passive_updates=passive_updates,
)
},
)
self.mapper_registry.map_imperatively(
Manager, manager, inherits=Person, polymorphic_identity="manager"
)
if threelevel:
self.mapper_registry.map_imperatively(
Owner, owner, inherits=Manager, polymorphic_identity="owner"
)
@testing.requires.on_update_cascade
def test_pk_passive(self):
self._test_pk(True)
@testing.requires.non_updating_cascade
def test_pk_nonpassive(self):
self._test_pk(False)
@testing.requires.on_update_cascade
def test_fk_passive(self):
self._test_fk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.requires.non_updating_cascade
def test_fk_nonpassive(self):
self._test_fk(False)
@testing.requires.on_update_cascade
def test_pk_threelevel_passive(self):
self._test_pk_threelevel(True)
@testing.requires.non_updating_cascade
def test_pk_threelevel_nonpassive(self):
self._test_pk_threelevel(False)
@testing.requires.on_update_cascade
def test_fk_threelevel_passive(self):
self._test_fk_threelevel(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.requires.non_updating_cascade
def test_fk_threelevel_nonpassive(self):
self._test_fk_threelevel(False)
def _test_pk(self, passive_updates):
(Engineer,) = self.classes("Engineer")
self._mapping_fixture(False, passive_updates)
sess = fixture_session()
e1 = Engineer(name="dilbert", primary_language="java")
sess.add(e1)
sess.commit()
e1.name = "wally"
e1.primary_language = "c++"
sess.commit()
eq_(
sess.execute(self.tables.engineer.select()).fetchall(),
[("wally", "c++", None)],
)
eq_(e1.name, "wally")
e1.name = "dogbert"
sess.commit()
eq_(e1.name, "dogbert")
eq_(
sess.execute(self.tables.engineer.select()).fetchall(),
[("dogbert", "c++", None)],
)
def _test_fk(self, passive_updates):
Manager, Engineer = self.classes("Manager", "Engineer")
self._mapping_fixture(False, passive_updates)
sess = fixture_session()
m1 = Manager(name="dogbert", paperwork="lots")
e1, e2 = (
Engineer(name="dilbert", primary_language="java", boss=m1),
Engineer(name="wally", primary_language="c++", boss=m1),
)
sess.add_all([e1, e2, m1])
sess.commit()
eq_(e1.boss_name, "dogbert")
eq_(e2.boss_name, "dogbert")
eq_(
sess.execute(
self.tables.engineer.select().order_by(Engineer.name)
).fetchall(),
[("dilbert", "java", "dogbert"), ("wally", "c++", "dogbert")],
)
sess.expire_all()
m1.name = "pointy haired"
e1.primary_language = "scala"
e2.primary_language = "cobol"
sess.commit()
eq_(e1.boss_name, "pointy haired")
eq_(e2.boss_name, "pointy haired")
eq_(
sess.execute(
self.tables.engineer.select().order_by(Engineer.name)
).fetchall(),
[
("dilbert", "scala", "pointy haired"),
("wally", "cobol", "pointy haired"),
],
)
def _test_pk_threelevel(self, passive_updates):
(Owner,) = self.classes("Owner")
self._mapping_fixture(True, passive_updates)
sess = fixture_session()
o1 = Owner(name="dogbert", owner_name="dog")
sess.add(o1)
sess.commit()
o1.name = "pointy haired"
o1.owner_name = "pointy"
sess.commit()
eq_(
sess.execute(self.tables.manager.select()).fetchall(),
[("pointy haired", None)],
)
eq_(
sess.execute(self.tables.owner.select()).fetchall(),
[("pointy haired", "pointy")],
)
eq_(o1.name, "pointy haired")
o1.name = "catbert"
sess.commit()
eq_(o1.name, "catbert")
eq_(
sess.execute(self.tables.manager.select()).fetchall(),
[("catbert", None)],
)
eq_(
sess.execute(self.tables.owner.select()).fetchall(),
[("catbert", "pointy")],
)
def _test_fk_threelevel(self, passive_updates):
Owner, Engineer = self.classes("Owner", "Engineer")
self._mapping_fixture(True, passive_updates)
sess = fixture_session()
m1 = Owner(name="dogbert", paperwork="lots", owner_name="dog")
e1, e2 = (
Engineer(name="dilbert", primary_language="java", boss=m1),
Engineer(name="wally", primary_language="c++", boss=m1),
)
sess.add_all([e1, e2, m1])
sess.commit()
eq_(e1.boss_name, "dogbert")
eq_(e2.boss_name, "dogbert")
sess.expire_all()
m1.name = "pointy haired"
e1.primary_language = "scala"
e2.primary_language = "cobol"
sess.commit()
eq_(e1.boss_name, "pointy haired")
eq_(e2.boss_name, "pointy haired")
eq_(
sess.execute(self.tables.manager.select()).fetchall(),
[("pointy haired", "lots")],
)
eq_(
sess.execute(self.tables.owner.select()).fetchall(),
[("pointy haired", "dog")],
)
| JoinedInheritanceTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_storage_transfer_service.py | {
"start": 11030,
"end": 16377
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_job_create_gcs(self, mock_hook):
mock_hook.return_value.create_transfer_job.return_value = VALID_TRANSFER_JOB_GCS
body = deepcopy(VALID_TRANSFER_JOB_GCS)
del body["name"]
op = CloudDataTransferServiceCreateJobOperator(
body=body,
task_id=TASK_ID,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_transfer_job.assert_called_once_with(body=VALID_TRANSFER_JOB_GCS_RAW)
assert result == VALID_TRANSFER_JOB_GCS
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_storage_transfer_service.AwsBaseHook")
def test_job_create_aws(self, aws_hook, mock_hook):
mock_hook.return_value.create_transfer_job.return_value = VALID_TRANSFER_JOB_AWS
aws_hook.return_value.get_credentials.return_value = Credentials(
TEST_AWS_ACCESS_KEY_ID, TEST_AWS_ACCESS_SECRET, None
)
body = deepcopy(VALID_TRANSFER_JOB_AWS)
del body["name"]
op = CloudDataTransferServiceCreateJobOperator(
body=body,
task_id=TASK_ID,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_transfer_job.assert_called_once_with(body=VALID_TRANSFER_JOB_AWS_RAW)
assert result == VALID_TRANSFER_JOB_AWS
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_storage_transfer_service.AwsBaseHook")
def test_job_create_aws_with_role_arn(self, aws_hook, mock_hook):
mock_hook.return_value.create_transfer_job.return_value = VALID_TRANSFER_JOB_AWS_ROLE_ARN
body = deepcopy(VALID_TRANSFER_JOB_AWS_ROLE_ARN)
del body["name"]
op = CloudDataTransferServiceCreateJobOperator(
body=body,
task_id=TASK_ID,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_transfer_job.assert_called_once_with(
body=VALID_TRANSFER_JOB_AWS_WITH_ROLE_ARN_RAW
)
assert result == VALID_TRANSFER_JOB_AWS_ROLE_ARN
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_storage_transfer_service.AwsBaseHook")
def test_job_create_multiple(self, aws_hook, gcp_hook):
aws_hook.return_value.get_credentials.return_value = Credentials(
TEST_AWS_ACCESS_KEY_ID, TEST_AWS_ACCESS_SECRET, None
)
gcp_hook.return_value.create_transfer_job.return_value = VALID_TRANSFER_JOB_AWS
body = deepcopy(VALID_TRANSFER_JOB_AWS)
op = CloudDataTransferServiceCreateJobOperator(body=body, task_id=TASK_ID)
result = op.execute(context=mock.MagicMock())
assert result == VALID_TRANSFER_JOB_AWS
op = CloudDataTransferServiceCreateJobOperator(body=body, task_id=TASK_ID)
result = op.execute(context=mock.MagicMock())
assert result == VALID_TRANSFER_JOB_AWS
# Setting all the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all
# fields
@pytest.mark.db_test
@pytest.mark.parametrize(
("body", "excepted"),
[(VALID_TRANSFER_JOB_JINJA, VALID_TRANSFER_JOB_JINJA_RENDERED)],
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_templates(self, _, create_task_instance_of_operator, body, excepted, session):
dag_id = "TestGcpStorageTransferJobCreateOperator"
ti = create_task_instance_of_operator(
CloudDataTransferServiceCreateJobOperator,
dag_id=dag_id,
body=body,
gcp_conn_id="{{ dag.dag_id }}",
aws_conn_id="{{ dag.dag_id }}",
task_id="task-id",
)
session.add(ti)
session.commit()
ti.render_templates()
assert excepted == getattr(ti.task, "body")
assert dag_id == getattr(ti.task, "gcp_conn_id")
assert dag_id == getattr(ti.task, "aws_conn_id")
| TestGcpStorageTransferJobCreateOperator |
python | pyca__cryptography | tests/hazmat/primitives/test_ciphers.py | {
"start": 1876,
"end": 2548
} | class ____:
@pytest.mark.parametrize("mode", (modes.CBC, modes.CTR, CFB, CFB8, OFB))
def test_invalid_key_size_with_mode(self, mode, backend):
with pytest.raises(ValueError):
ciphers.Cipher(AES(b"0" * 64), mode(b"0" * 16), backend)
def test_xts_tweak_not_bytes(self):
with pytest.raises(TypeError):
modes.XTS(32) # type: ignore[arg-type]
def test_xts_tweak_too_small(self):
with pytest.raises(ValueError):
modes.XTS(b"0")
def test_xts_wrong_key_size(self, backend):
with pytest.raises(ValueError):
ciphers.Cipher(AES(b"0" * 16), modes.XTS(b"0" * 16), backend)
| TestAESXTS |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 27655,
"end": 28057
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
code: 412, title: Precondition Failed
"""
code = 412
title = 'Precondition Failed'
explanation = 'Request precondition failed.'
| HTTPPreconditionFailed |
python | kubernetes-client__python | kubernetes/client/models/v1_service_port.py | {
"start": 383,
"end": 11951
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'app_protocol': 'str',
'name': 'str',
'node_port': 'int',
'port': 'int',
'protocol': 'str',
'target_port': 'object'
}
attribute_map = {
'app_protocol': 'appProtocol',
'name': 'name',
'node_port': 'nodePort',
'port': 'port',
'protocol': 'protocol',
'target_port': 'targetPort'
}
def __init__(self, app_protocol=None, name=None, node_port=None, port=None, protocol=None, target_port=None, local_vars_configuration=None): # noqa: E501
"""V1ServicePort - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._app_protocol = None
self._name = None
self._node_port = None
self._port = None
self._protocol = None
self._target_port = None
self.discriminator = None
if app_protocol is not None:
self.app_protocol = app_protocol
if name is not None:
self.name = name
if node_port is not None:
self.node_port = node_port
self.port = port
if protocol is not None:
self.protocol = protocol
if target_port is not None:
self.target_port = target_port
@property
def app_protocol(self):
"""Gets the app_protocol of this V1ServicePort. # noqa: E501
The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:return: The app_protocol of this V1ServicePort. # noqa: E501
:rtype: str
"""
return self._app_protocol
@app_protocol.setter
def app_protocol(self, app_protocol):
"""Sets the app_protocol of this V1ServicePort.
The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:param app_protocol: The app_protocol of this V1ServicePort. # noqa: E501
:type: str
"""
self._app_protocol = app_protocol
@property
def name(self):
"""Gets the name of this V1ServicePort. # noqa: E501
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. # noqa: E501
:return: The name of this V1ServicePort. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ServicePort.
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. # noqa: E501
:param name: The name of this V1ServicePort. # noqa: E501
:type: str
"""
self._name = name
@property
def node_port(self):
"""Gets the node_port of this V1ServicePort. # noqa: E501
The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport # noqa: E501
:return: The node_port of this V1ServicePort. # noqa: E501
:rtype: int
"""
return self._node_port
@node_port.setter
def node_port(self, node_port):
"""Sets the node_port of this V1ServicePort.
The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport # noqa: E501
:param node_port: The node_port of this V1ServicePort. # noqa: E501
:type: int
"""
self._node_port = node_port
@property
def port(self):
"""Gets the port of this V1ServicePort. # noqa: E501
The port that will be exposed by this service. # noqa: E501
:return: The port of this V1ServicePort. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1ServicePort.
The port that will be exposed by this service. # noqa: E501
:param port: The port of this V1ServicePort. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def protocol(self):
"""Gets the protocol of this V1ServicePort. # noqa: E501
The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. # noqa: E501
:return: The protocol of this V1ServicePort. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this V1ServicePort.
The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. # noqa: E501
:param protocol: The protocol of this V1ServicePort. # noqa: E501
:type: str
"""
self._protocol = protocol
@property
def target_port(self):
"""Gets the target_port of this V1ServicePort. # noqa: E501
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service # noqa: E501
:return: The target_port of this V1ServicePort. # noqa: E501
:rtype: object
"""
return self._target_port
@target_port.setter
def target_port(self, target_port):
"""Sets the target_port of this V1ServicePort.
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service # noqa: E501
:param target_port: The target_port of this V1ServicePort. # noqa: E501
:type: object
"""
self._target_port = target_port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServicePort):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServicePort):
return True
return self.to_dict() != other.to_dict()
| V1ServicePort |
python | kamyu104__LeetCode-Solutions | Python/dungeon-game.py | {
"start": 37,
"end": 636
} | class ____(object):
# @param dungeon, a list of lists of integers
# @return a integer
def calculateMinimumHP(self, dungeon):
DP = [float("inf") for _ in dungeon[0]]
DP[-1] = 1
for i in reversed(xrange(len(dungeon))):
DP[-1] = max(DP[-1] - dungeon[i][-1], 1)
for j in reversed(xrange(len(dungeon[i]) - 1)):
min_HP_on_exit = min(DP[j], DP[j + 1])
DP[j] = max(min_HP_on_exit - dungeon[i][j], 1)
return DP[0]
# Time: O(m * n logk), where k is the possible maximum sum of loses
# Space: O(m + n)
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 14927,
"end": 17913
} | class ____(_LiteralRoundTripFixture, fixtures.TestBase):
__requires__ = ("datetime_interval",)
__backend__ = True
datatype = Interval
data = datetime.timedelta(days=1, seconds=4)
def test_literal(self, literal_round_trip):
literal_round_trip(self.datatype, [self.data], [self.data])
def test_select_direct_literal_interval(self, connection):
row = connection.execute(select(literal(self.data))).first()
eq_(row, (self.data,))
def test_arithmetic_operation_literal_interval(self, connection):
now = datetime.datetime.now().replace(microsecond=0)
# Able to subtract
row = connection.execute(
select(literal(now) - literal(self.data))
).scalar()
eq_(row, now - self.data)
# Able to Add
row = connection.execute(
select(literal(now) + literal(self.data))
).scalar()
eq_(row, now + self.data)
@testing.fixture
def arithmetic_table_fixture(cls, metadata, connection):
class Decorated(TypeDecorator):
impl = cls.datatype
cache_ok = True
it = Table(
"interval_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("interval_data", cls.datatype),
Column("date_data", DateTime),
Column("decorated_interval_data", Decorated),
)
it.create(connection)
return it
def test_arithmetic_operation_table_interval_and_literal_interval(
self, connection, arithmetic_table_fixture
):
interval_table = arithmetic_table_fixture
data = datetime.timedelta(days=2, seconds=5)
connection.execute(
interval_table.insert(), {"id": 1, "interval_data": data}
)
# Subtraction Operation
value = connection.execute(
select(interval_table.c.interval_data - literal(self.data))
).scalar()
eq_(value, data - self.data)
# Addition Operation
value = connection.execute(
select(interval_table.c.interval_data + literal(self.data))
).scalar()
eq_(value, data + self.data)
def test_arithmetic_operation_table_date_and_literal_interval(
self, connection, arithmetic_table_fixture
):
interval_table = arithmetic_table_fixture
now = datetime.datetime.now().replace(microsecond=0)
connection.execute(
interval_table.insert(), {"id": 1, "date_data": now}
)
# Subtraction Operation
value = connection.execute(
select(interval_table.c.date_data - literal(self.data))
).scalar()
eq_(value, (now - self.data))
# Addition Operation
value = connection.execute(
select(interval_table.c.date_data + literal(self.data))
).scalar()
eq_(value, (now + self.data))
| IntervalTest |
python | streamlit__streamlit | lib/tests/streamlit/runtime/caching/cache_resource_api_test.py | {
"start": 1761,
"end": 7899
} | class ____(unittest.TestCase):
def setUp(self) -> None:
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
def tearDown(self):
st.cache_resource.clear()
# Some of these tests reach directly into _cache_info and twiddle it.
# Reset default values on teardown.
@patch.object(st, "exception")
def test_mutate_return(self, exception):
"""Mutating a cache_resource return value is legal, and *will* affect
future accessors of the data."""
@st.cache_resource
def f():
return [0, 1]
r1 = f()
r1[0] = 1
r2 = f()
exception.assert_not_called()
assert r1 == [1, 1]
assert r2 == [1, 1]
def test_cached_member_function_with_hash_func(self):
"""@st.cache_resource can be applied to class member functions
with corresponding hash_func.
"""
class TestClass:
@st.cache_resource(
hash_funcs={
"tests.streamlit.runtime.caching.cache_resource_api_test."
"CacheResourceTest.test_cached_member_function_with_hash_func."
"<locals>.TestClass": id
}
)
def member_func(self):
return "member func!"
@classmethod
@st.cache_resource
def class_method(cls):
return "class method!"
@staticmethod
@st.cache_resource
def static_method():
return "static method!"
obj = TestClass()
assert obj.member_func() == "member func!"
assert obj.class_method() == "class method!"
assert obj.static_method() == "static method!"
def test_function_name_does_not_use_hashfuncs(self):
"""Hash funcs should only be used on arguments to a function,
and not when computing the key for a function's unique MemCache.
"""
str_hash_func = Mock(return_value=None)
@st.cache_resource(hash_funcs={str: str_hash_func})
def foo(string_arg):
return []
# If our str hash_func is called multiple times, it's probably because
# it's being used to compute the function's function_key (as opposed to
# the value_key). It should only be used to compute the value_key!
foo("ahoy")
str_hash_func.assert_called_once_with("ahoy")
def test_user_hash_error(self):
class MyObj:
# we specify __repr__ here, to avoid `MyObj object at 0x1347a3f70`
# in error message
def __repr__(self):
return "MyObj class"
def bad_hash_func(x):
x += 10 # Throws a TypeError since x has type MyObj.
return x
@st.cache_resource(hash_funcs={MyObj: bad_hash_func})
def user_hash_error_func(x):
pass
with pytest.raises(UserHashError) as ctx:
my_obj = MyObj()
user_hash_error_func(my_obj)
expected_message = """unsupported operand type(s) for +=: 'MyObj' and 'int'
This error is likely due to a bug in `bad_hash_func()`, which is a
user-defined hash function that was passed into the `@st.cache_resource` decorator of
`user_hash_error_func()`.
`bad_hash_func()` failed when hashing an object of type
`tests.streamlit.runtime.caching.cache_resource_api_test.CacheResourceTest.test_user_hash_error.<locals>.MyObj`. If you don't know where that object is coming from,
try looking at the hash chain below for an object that you do recognize, then
pass that to `hash_funcs` instead:
```
Object of type tests.streamlit.runtime.caching.cache_resource_api_test.CacheResourceTest.test_user_hash_error.<locals>.MyObj: MyObj class
```
If you think this is actually a Streamlit bug, please
[file a bug report here](https://github.com/streamlit/streamlit/issues/new/choose).""" # noqa: E501
assert str(ctx.value) == expected_message
def test_cached_st_function_clear_args(self):
self.x = 0
@st.cache_resource()
def foo(y):
self.x += y
return self.x
assert foo(1) == 1
foo.clear(2)
assert foo(1) == 1
foo.clear(1)
assert foo(1) == 2
def test_cached_class_method_clear_args(self):
self.x = 0
class ExampleClass:
@st.cache_resource()
def foo(_self, y):
self.x += y
return self.x
example_instance = ExampleClass()
# Calling method foo produces the side effect of incrementing self.x
# and returning it as the result.
# calling foo(1) should return 1
assert example_instance.foo(1) == 1
# calling foo.clear(2) should clear the cache for the argument 2,
# and keep the cache for the argument 1, therefore calling foo(1) should return
# cached value 1
example_instance.foo.clear(2)
assert example_instance.foo(1) == 1
# calling foo.clear(1) should clear the cache for the argument 1,
# therefore calling foo(1) should return the new value 2
example_instance.foo.clear(1)
assert example_instance.foo(1) == 2
# Try the same with a keyword argument:
example_instance.foo.clear(y=1)
assert example_instance.foo(1) == 3
def test_cached_class_method_clear(self):
self.x = 0
class ExampleClass:
@st.cache_resource()
def foo(_self, y):
self.x += y
return self.x
example_instance = ExampleClass()
# Calling method foo produces the side effect of incrementing self.x
# and returning it as the result.
# calling foo(1) should return 1
assert example_instance.foo(1) == 1
example_instance.foo.clear()
# calling foo.clear() should clear all cached values:
# So the call to foo() should return the new value 2
assert example_instance.foo(1) == 2
| CacheResourceTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.