language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/forms_tests/widget_tests/test_telinput.py | {
"start": 66,
"end": 270
} | class ____(WidgetTest):
widget = TelInput()
def test_render(self):
self.check_html(
self.widget, "telephone", "", html='<input type="tel" name="telephone">'
)
| TelInputTest |
python | astropy__astropy | astropy/utils/tests/test_decorators.py | {
"start": 572,
"end": 749
} | class ____(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
| NewDeprecationWarning |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 6478,
"end": 6764
} | class ____(resource.Resource):
"""
A testing resource which renders itself as the value of the Content-Length
header from the request.
"""
def render(self, request):
return request.requestHeaders.getRawHeaders(b"content-length")[0]
| ContentLengthHeaderResource |
python | PyCQA__pylint | tests/functional/g/genexp_in_class_scope.py | {
"start": 115,
"end": 187
} | class ____:
var1 = []
var2 = list(value*2 for value in var1)
| MyClass |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 155885,
"end": 156887
} | class ____(TypedDict, total=False):
"""
:class:`altair.LinearGradient` ``TypedDict`` wrapper.
Parameters
----------
gradient
The type of gradient. Use ``"linear"`` for a linear gradient.
stops
An array of gradient stops defining the gradient color sequence.
id
x1
The starting x-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
x2
The ending x-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``1``
y1
The starting y-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
y2
The ending y-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
"""
gradient: Literal["linear"]
stops: Sequence[GradientStopKwds]
id: str
x1: float
x2: float
y1: float
y2: float
| LinearGradientKwds |
python | getsentry__sentry | tests/sentry/sentry_apps/services/test_model.py | {
"start": 1514,
"end": 3090
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(name="foo")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(slug="boop", organization=self.org)
self.sentry_app = self.create_sentry_app(
name="foo", organization=self.org, webhook_url="https://example.com", scopes=()
)
self.install = self.create_sentry_app_installation(
slug="foo", organization=self.org, user=self.user
)
def test_serializes_correct_fields_helper(self) -> None:
install = serialize_sentry_app_installation(self.install, self.install.sentry_app)
assert (
f"this {install.sentry_app.application} is so skibidi"
== f"this id={install.sentry_app.application_id} is so skibidi"
)
assert f"this {install.sentry_app} is so skibidi".lower().find("client_id") == -1
assert f"this {install.sentry_app} is so toilet".lower().find("client_secret") == -1
def test_serializes_correct_fields_(self) -> None:
install = app_service.get_many(filter=dict(installation_ids=[self.install.id]))[0]
assert (
f"this {install.sentry_app.application} is so skibidi"
== f"this id={install.sentry_app.application_id} is so skibidi"
)
assert f"this {install.sentry_app} is so skibidi".lower().find("client_id") == -1
assert f"this {install.sentry_app} is so toilet".lower().find("client_secret") == -1
| TestRpcApiApplication |
python | huggingface__transformers | src/transformers/core_model_loading.py | {
"start": 15742,
"end": 17427
} | class ____(WeightTransform):
# Special case of WeightTransform that only renames keys without any conversion.
def convert(
self,
layer_name: str,
model=None,
config=None,
hf_quantizer=None,
missing_keys: Optional[MutableSet[str]] = None,
misc: Optional[MutableMapping[str, str]] = None,
):
# Collect the tensor if using threading
for pattern, futures in self.collected_tensors.items():
self.collected_tensors[pattern] = (
futures if isinstance(futures[0], torch.Tensor) else [future.result() for future in futures]
)
# Perform renaming op (for a simple WeightRenaming, `self.source_patterns` and `self.target_patterns` can
# only be of length 1, and are actually the full key names - we also have only 1 single related tensor)
target_key = self.target_patterns[0]
collected_tensors = {target_key: self.collected_tensors[self.source_patterns[0]]}
if hf_quantizer is not None and self.quantization_operation is not None:
with log_to_misc(layer_name, misc, (self.collected_tensors, layer_name), self.quantization_operation):
collected_tensors = self.quantization_operation.convert(
collected_tensors,
source_patterns=self.source_patterns,
target_patterns=self.target_patterns,
full_layer_name=target_key,
model=model,
config=config,
missing_keys=missing_keys,
)
return collected_tensors, misc
@dataclass(slots=True)
| WeightRenaming |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 18763,
"end": 19342
} | class ____(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
ret = None
if not isinstance(tup, types.LiteralStrKeyDict):
return
if isinstance(idx, str):
if idx in tup.fields:
lookup = tup.fields.index(idx)
else:
raise errors.NumbaKeyError(f"Key '{idx}' is not in dict.")
ret = tup.types[lookup]
if ret is not None:
sig = signature(ret, *args)
return sig
@infer
| StaticGetItemLiteralStrKeyDict |
python | tensorflow__tensorflow | tensorflow/python/platform/flags.py | {
"start": 1975,
"end": 4086
} | class ____:
"""Wrapper class for absl.flags.FLAGS.
The difference is that tf.flags.FLAGS implicitly parses flags with sys.argv
when accessing the FLAGS values before it's explicitly parsed,
while absl.flags.FLAGS raises an exception.
"""
def __init__(self, flags_object):
self.__dict__['__wrapped'] = flags_object
def __getattribute__(self, name):
if name == '__dict__':
return super().__getattribute__(name)
return self.__dict__['__wrapped'].__getattribute__(name)
def __getattr__(self, name):
wrapped = self.__dict__['__wrapped']
# To maintain backwards compatibility, implicitly parse flags when reading
# a flag.
if not wrapped.is_parsed():
wrapped(_sys.argv)
return wrapped.__getattr__(name)
def __setattr__(self, name, value):
return self.__dict__['__wrapped'].__setattr__(name, value)
def __delattr__(self, name):
return self.__dict__['__wrapped'].__delattr__(name)
def __dir__(self):
return self.__dict__['__wrapped'].__dir__()
def __getitem__(self, name):
return self.__dict__['__wrapped'].__getitem__(name)
def __setitem__(self, name, flag):
return self.__dict__['__wrapped'].__setitem__(name, flag)
def __len__(self):
return self.__dict__['__wrapped'].__len__()
def __iter__(self):
return self.__dict__['__wrapped'].__iter__()
def __str__(self):
return self.__dict__['__wrapped'].__str__()
def __call__(self, *args, **kwargs):
return self.__dict__['__wrapped'].__call__(*args, **kwargs)
# pylint: disable=invalid-name,used-before-assignment
# absl.flags APIs use `default` as the name of the default value argument.
# Allow the following functions continue to accept `default_value`.
DEFINE_string = _wrap_define_function(DEFINE_string)
DEFINE_boolean = _wrap_define_function(DEFINE_boolean)
DEFINE_bool = DEFINE_boolean
DEFINE_float = _wrap_define_function(DEFINE_float)
DEFINE_integer = _wrap_define_function(DEFINE_integer)
# pylint: enable=invalid-name,used-before-assignment
FLAGS = _FlagValuesWrapper(FLAGS) # pylint: disable=used-before-assignment
| _FlagValuesWrapper |
python | numba__numba | numba/cuda/stubs.py | {
"start": 4551,
"end": 4835
} | class ____(Stub):
'''
vote_sync_intrinsic(mask, mode, predictate)
Nvvm intrinsic for performing a reduce and broadcast across a warp
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-vote
'''
_description_ = '<vote_sync()>'
| vote_sync_intrinsic |
python | doocs__leetcode | solution/2100-2199/2117.Abbreviating the Product of a Range/Solution.py | {
"start": 0,
"end": 939
} | class ____:
def abbreviateProduct(self, left: int, right: int) -> str:
cnt2 = cnt5 = 0
for x in range(left, right + 1):
while x % 2 == 0:
cnt2 += 1
x //= 2
while x % 5 == 0:
cnt5 += 1
x //= 5
c = cnt2 = cnt5 = min(cnt2, cnt5)
pre = suf = 1
gt = False
for x in range(left, right + 1):
suf *= x
while cnt2 and suf % 2 == 0:
suf //= 2
cnt2 -= 1
while cnt5 and suf % 5 == 0:
suf //= 5
cnt5 -= 1
if suf >= 1e10:
gt = True
suf %= int(1e10)
pre *= x
while pre > 1e5:
pre /= 10
if gt:
return str(int(pre)) + "..." + str(suf % int(1e5)).zfill(5) + "e" + str(c)
return str(suf) + "e" + str(c)
| Solution |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-dashscope-rerank/llama_index/postprocessor/dashscope_rerank/base.py | {
"start": 635,
"end": 3255
} | class ____(BaseNodePostprocessor):
model: str = Field(description="Dashscope rerank model name.")
top_n: int = Field(description="Top N nodes to return.")
_api_key: Optional[str] = PrivateAttr()
def __init__(
self,
top_n: int = 3,
model: str = "gte-rerank",
return_documents: bool = False,
api_key: Optional[str] = None,
):
try:
api_key = api_key or os.environ["DASHSCOPE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in dashscope api key or "
"specify via DASHSCOPE_API_KEY environment variable "
)
super().__init__(top_n=top_n, model=model, return_documents=return_documents)
self._api_key = api_key
@classmethod
def class_name(cls) -> str:
return "DashScopeRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
model_name=self.model,
top_n=self.top_n,
query=query_bundle,
nodes=nodes,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = dashscope.TextReRank.call(
model=self.model,
top_n=self.top_n,
query=query_bundle.query_str,
documents=texts,
api_key=self._api_key,
)
new_nodes = []
for result in results.output.results:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.relevance_score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(
ReRankEndEvent(
nodes=new_nodes,
)
)
return new_nodes
| DashScopeRerank |
python | plotly__plotly.py | plotly/graph_objs/splom/marker/_colorbar.py | {
"start": 233,
"end": 61588
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom.marker"
_path_str = "splom.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.splom.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.splom.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.splom.marker.c
olorbar.tickformatstopdefaults), sets the default property
values to use for elements of
splom.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.splom.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.splom.marker.co
lorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.splom.
marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
splom.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.splom.marker.colorbar.Titl
e` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.splom.marker.co
lorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.splom.
marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
splom.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.splom.marker.colorbar.Titl
e` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | openai__openai-python | src/openai/types/batch_create_params.py | {
"start": 313,
"end": 2053
} | class ____(TypedDict, total=False):
completion_window: Required[Literal["24h"]]
"""The time frame within which the batch should be processed.
Currently only `24h` is supported.
"""
endpoint: Required[
Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"]
]
"""The endpoint to be used for all requests in the batch.
Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
`/v1/completions`, and `/v1/moderations` are supported. Note that
`/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding
inputs across all requests in the batch.
"""
input_file_id: Required[str]
"""The ID of an uploaded file that contains requests for the new batch.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
output_expires_after: OutputExpiresAfter
"""
The expiration policy for the output and/or error file that are generated for a
batch.
"""
| BatchCreateParams |
python | dask__distributed | distributed/diagnostics/plugin.py | {
"start": 13578,
"end": 16823
} | class ____(SchedulerPlugin):
"""Scheduler plugin to install software on the cluster
This accepts an function that installs software on the scheduler and
all workers. You can also optionally ask for the worker to restart
after performing this installation.
.. note::
This will increase the time it takes to start up
each worker. If possible, we recommend including the
software in the worker environment or image. This is
primarily intended for experimentation and debugging.
Parameters
----------
install_fn
Callable used to install the software; must be idempotent.
restart_workers
Whether or not to restart the worker after installing the packages
Only functions if the worker has an attached nanny process
See Also
--------
CondaInstall
PipInstall
"""
idempotent = True
_lock: ClassVar[asyncio.Lock | None] = None
_install_fn: Callable[[], None]
name: str
restart_workers: bool
_scheduler: Scheduler
def __init__(
self,
install_fn: Callable[[], None],
restart_workers: bool,
):
self._install_fn = install_fn
self.restart_workers = restart_workers
self.name = f"{self.__class__.__name__}-{uuid.uuid4()}"
async def start(self, scheduler: Scheduler) -> None:
from distributed.core import clean_exception
from distributed.protocol.serialize import Serialized, deserialize
self._scheduler = scheduler
if InstallPlugin._lock is None:
InstallPlugin._lock = asyncio.Lock()
async with InstallPlugin._lock:
self._install_fn()
if self.restart_workers:
nanny_plugin = _InstallNannyPlugin(self._install_fn, self.name)
responses = await scheduler.register_nanny_plugin(
comm=None,
plugin=dumps(nanny_plugin),
name=self.name,
idempotent=True,
)
else:
worker_plugin = _InstallWorkerPlugin(self._install_fn, self.name)
responses = await scheduler.register_worker_plugin(
comm=None,
plugin=dumps(worker_plugin),
name=self.name,
idempotent=True,
)
for response in responses.values():
if response["status"] == "error":
response = { # type: ignore[unreachable]
k: deserialize(v.header, v.frames)
for k, v in response.items()
if isinstance(v, Serialized)
}
_, exc, tb = clean_exception(**response)
raise exc.with_traceback(tb)
async def close(self) -> None:
assert InstallPlugin._lock is not None
async with InstallPlugin._lock:
if self.restart_workers:
await self._scheduler.unregister_nanny_plugin(comm=None, name=self.name)
else:
await self._scheduler.unregister_worker_plugin(
comm=None, name=self.name
)
| InstallPlugin |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 89535,
"end": 104446
} | class ____(DefaultHandler):
"""A ops handler that proxies calls to `kernel` and its
handler and returns `CSEVariable`s with correct shape and dtype.
"""
name = "CSEProxy"
def __init__(self, kernel: Kernel[Any], parent_handler: OpsHandler[Any]):
super().__init__()
from ..bounds import ValueRangeAnalysis
self.vr_analysis = ValueRangeAnalysis()
self.kernel = kernel
self.parent_handler = parent_handler
def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
bounds = self._bound_variable(name, *args, **kwargs)
value = getattr(self.parent_handler, name)(*args, **kwargs)
dtype_handler = DtypePropagationOpsHandler()
shape_handler = ShapePropagationOpsHandler()
backend = get_current_backend()
shape_op = getattr(shape_handler, name)
output_dtype = None
output_shape = None
if name == "masked" and backend == "triton":
output_dtype = value.dtype
output_shape = value.shape
elif name == "masked" and backend == "cpp":
output_dtype = V.interpreter.current_node.meta.get(
OptimizationContext.key, None
).dtype
# TODO: fix me
output_shape = None
elif backend in ("triton", "cpp", "mps"):
dtype_op = getattr(dtype_handler, name)
output_dtype = dtype_op(*args, **kwargs)
output_shape = shape_op(*args, **kwargs)
if backend in ("triton", "cpp"):
# maybe there are some exceptions on mps?
assert output_dtype is not None
output_idx = 0
def do_cse(v: Union[str, CSEVariable]) -> CSEVariable:
# we tree_map over the output, so we need to fetch corresponding dtype
nonlocal output_idx
var_dtype: Optional[torch.dtype] = (
output_dtype[output_idx]
if isinstance(output_dtype, (list, tuple))
else output_dtype
)
var_shape: BlockShapeType = (
output_shape[output_idx] # type: ignore[assignment]
if isinstance(output_shape, (list, tuple))
and len(output_shape) > 0
and isinstance(output_shape[0], (list, tuple))
else output_shape
)
output_idx += 1
# some cpp op implementations don't set the dtype
if isinstance(v, CSEVariable):
if backend == "cpp" and v.dtype is None:
v.dtype = var_dtype
if v.shape is None:
v.shape = var_shape
csevar = V.kernel.cse.generate(
V.kernel.compute,
v,
bounds=bounds,
dtype=output_dtype,
shape=output_shape,
)
csevar.update_on_args(name, args, kwargs)
if (
config.test_configs.runtime_triton_dtype_assert
or config.test_configs.static_cpp_dtype_assert
):
assert var_dtype is not None
check_dtype(V.kernel.compute, csevar, var_dtype)
if config.test_configs.runtime_triton_shape_assert:
assert output_shape is not None
check_shape(V.kernel.compute, csevar, output_shape)
if config.runtime_triton_nan_asserts:
check_nan(V.kernel.compute, csevar)
return csevar
return pytree.tree_map(do_cse, value)
def _bound_variable(self, name: str, *args: Any, **kwargs: Any) -> ValueRanges[Any]:
"""
If the variable comes from an FX node, we forward the bound we have already computed
Else, if the variable when codegen'ing another op, we try to compute its bounds
"""
from ..bounds import ValueRangeAnalysis
from ..select_algorithm import TritonTemplateKernel
from .cuda.cuda_kernel import CUDATemplateKernel
if isinstance(V.kernel, TritonTemplateKernel):
return ValueRanges.unknown()
if isinstance(V.kernel, CUDATemplateKernel):
return ValueRanges.unknown()
if isinstance(V.interpreter, NullHandler):
return ValueRanges.unknown()
fx_node = V.interpreter.current_node
if fx_node.target == name and self.kernel.node_to_bounds is not None:
assert isinstance(self.kernel.node_to_bounds, dict), type(
self.kernel.node_to_bounds
)
return self.kernel.node_to_bounds.get(fx_node, ValueRanges.unknown())
elif config.compute_all_bounds and hasattr(ValueRangeAnalysis, name):
# These create lots of inner strings. We would need to compute the bounds at the ops
# We will also likely not get much from computing VRs on these nodes
if any(s in fx_node.target for s in ("set_indirect", "reduction", "scan")):
return ValueRanges.unknown()
# We assume that the inputs come from `ops.` and are not strings. If you want to generate
# intermediary strings, wrap them in CSE variables with properly initialised bounds.
# If there is no FX bound but we know how to compute one we do so
assert not kwargs
def arg_to_bound(x: Any) -> Any:
if isinstance(x, CSEVariable):
return x.bounds
elif isinstance(x, sympy.Expr):
return bound_sympy(x)
else:
return x
arg_bounds = list(map(arg_to_bound, args))
return getattr(self.vr_analysis, name)(*arg_bounds)
return ValueRanges.unknown()
def indirect_indexing(
self,
var: CSEVariable,
size: Union[sympy.Expr, int],
check: bool = True,
wrap_neg: bool = True,
) -> sympy.Symbol:
if isinstance(size, int):
size = sympy.Integer(size)
assert isinstance(size, sympy.Expr), (type(size), size)
# Skip CSE since this doesn't return an expression
if var.bounds.lower < 0:
if wrap_neg:
stm = ops.add(var, ops.index_expr(size, torch.long))
# Mixed negative and non-negative
if var.bounds.upper >= 0:
lt = ops.lt(var, 0)
stm = ops.where(lt, stm, var)
else:
stm = var
# Propagate bounds as we know how to compute them properly
new_bounds = ValueRanges.unknown()
if var.bounds != ValueRanges.unknown() and isinstance(size, sympy.Number):
# Take the negative part of the bound and add size to it
# Then take union of that and the positive part
# This is a tighter bound than that of a generic ops.where, as we have info on the cond
neg_bounds = var.bounds & ValueRanges(-int_oo, -1)
new_bounds = ValueRanges(
neg_bounds.lower + size, neg_bounds.upper + size
)
# We don't have a good way of representing the empty range
if var.bounds.upper >= 0:
pos = var.bounds & ValueRanges(0, int_oo)
new_bounds = new_bounds | pos
var = self.kernel.cse.generate(
self.kernel.compute,
stm,
bounds=new_bounds,
dtype=var.dtype,
shape=var.shape,
)
sympy_var = self.parent_handler.indirect_indexing(var, size, check)
if generate_assert(check):
assert_lower = not (var.bounds.lower >= 0)
# value ranges cannot x < s when x and s are symbols
assert_upper = not isinstance(size, sympy.Number) or not (
var.bounds.upper < size
)
self.kernel.check_bounds(sympy_var, size, assert_lower, assert_upper)
return sympy_var
def check_bounds(
self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool
) -> None:
return self.kernel.check_bounds(expr, size, lower, upper)
def load(self, name: str, index: sympy.Expr) -> CSEVariable:
if name in self.kernel.cse.invalidated_stores:
# A load from an invalidated store requires us to
# keep the actual buffer around
V.kernel.must_keep_buffers.add(name)
if free_symbol_is_type(index, SymT.TMP):
return self.kernel.indirect_load(name, index)
store_cache = self.kernel.cse.store_cache
if name in store_cache:
return store_cache[name]
out = self.kernel.load(name, index)
# count load that is not in the store_cache, and also not in the
# cse cache.
if out.use_count == 1:
self.kernel.num_load += 1
return out
def _update_store_cache(self, name: str, value: CSEVariable) -> None:
self.kernel.cse.store_cache[name] = value
if self.kernel.current_node and name in V.graph.name_to_buffer:
buf = self.kernel.current_node.get_output(name)
for other_name in buf.get_mutations():
self.kernel.cse.store_cache[other_name] = value
def store(
self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None
) -> None:
self.kernel.store_buffer_names.add(name)
if mode is None:
self._update_store_cache(name, value)
if name not in V.graph.removed_buffers:
self.kernel.store(name, index, value, mode=mode)
self.kernel.num_store += 1
def device_assert_async(self, cond: CSEVariable, msg: str) -> None:
self.kernel.device_assert_async(cond, msg)
# pyrefly: ignore [bad-override]
def partial_accumulate(self, *args: Any) -> None:
self.kernel.partial_accumulate(*args)
def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable) -> None:
self.kernel.store_buffer_names.add(name)
self._update_store_cache(name, value)
if name not in V.graph.removed_buffers:
self.kernel.num_store += 1
return self.kernel.store_reduction(name, index, value)
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: Union[CSEVariable, tuple[CSEVariable, ...]],
) -> Union[CSEVariable, tuple[CSEVariable, ...]]:
self.kernel.num_reduction += 1
return self.kernel.reduction(dtype, src_dtype, reduction_type, value)
def scan(
self,
dtypes: tuple[torch.dtype, ...],
combine_fn: Callable[
[tuple[CSEVariable, ...], tuple[CSEVariable, ...]],
tuple[CSEVariable, ...],
],
values: tuple[CSEVariable, ...],
) -> tuple[CSEVariable, ...]:
return self.kernel.scan(dtypes, combine_fn, values)
def sort(
self,
dtypes: tuple[torch.dtype, ...],
values: tuple[CSEVariable, ...],
stable: bool,
descending: bool,
) -> tuple[CSEVariable, ...]:
return self.kernel.sort(dtypes, values, stable, descending)
def bucketize(
self,
values: CSEVariable,
boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr],
boundary_indices: CSEVariable,
indexing_dtype: torch.dtype,
right: bool,
sorter: Optional[tuple[str, sympy.Expr]] = None,
sorter_indices: Optional[CSEVariable] = None,
) -> CSEVariable:
"""
[Note: Inductor bucketize op]
Inputs:
-------
values: the values to be bucketized.
boundaries: a tuple containing
(a) the name of the boundaries tensor (which must be sorted, unless
the sorting tensor is present),
(b) the length of the tensor in the last dimension (i.e. the length of
one set of boundaries),
(c) the number of elements in the underlying storage (i.e. the length
of the flattened tensor, ignoring striding), and
(d) the stride of the tensor in the last dimension.
boundary_indices: indices into a flattened version of the boundaries
tensor, of the same size and shape as "values". Each index points to
the first element in the set of boundaries to be used for the
corresponding value.
indexing_dtype: the dtype to use when indexing into the boundaries
tensor. This must be int64 or int32. This additionally specifies the
dtype of the return value.
right: see "Details" below.
sorter: an optional tuple containing
(a) the name of an optional sorting tensor, used to access unsorted
boundaries without reordering the boundaries tensor, and
(b) the stride of the tensor in the last dimension.
The values in the sorting tensor are used as indices into the *last*
dimension of the boundaries tensor, with all other indices matching.
The size of the sorting and boundaries tensors must be equivalent.
sorter_indices: must be present if the sorting array is present; see
"boundary_indices" for the equivalent definition for the boundaries
tensor.
Output:
-------
The buckets each value belongs in, within a given set of boundaries. 0
indicates a position before the first boundary, and len(boundaries_set)
represents a position after the last boundary.
Details:
--------
Given a value and a set of boundaries, calculate the bucket that each
value belongs to. This works differently in 1-D and N-D cases.
for values [[-1, 0, 1, 2], [3, 4, 5, 9]], boundaries [0, 4, 4, 8], right=True
return = [[ 0, 1, 1, 1], [1, 3, 3, 4]].
for values [[-1, 0, 1, 2], [3, 4, 5, 9]], boundaries [[0, 4], [4, 8]], right=True
return = [[ 0, 1, 1, 1], [0, 1, 1, 2]]
Note that in the N-D boundaries case, the shape of "values" and
"boundaries" must match in every dimension _except_ the last.
When right == False, bucket i refers to range (boundaries[i], boundaries[i+1]].
When right == True, bucket i refers to range [boundaries[i], boundaries[i+1]).
Boundaries must be non-decreasing, or a sorter must be provided which
would re-index offsets in a non-decreasing order (e.g. the second output
of torch.sort(offsets)). Otherwise, the result is undefined.
"""
return self.kernel.bucketize(
values,
boundaries,
boundary_indices,
indexing_dtype,
right,
sorter,
sorter_indices,
)
| CSEProxy |
python | paramiko__paramiko | tests/test_sftp.py | {
"start": 3597,
"end": 30045
} | class ____:
def test_file(self, sftp):
"""
verify that we can create a file.
"""
f = sftp.open(sftp.FOLDER + "/test", "w")
try:
assert f.stat().st_size == 0
finally:
f.close()
sftp.remove(sftp.FOLDER + "/test")
def test_close(self, sftp):
"""
Verify that SFTP session close() causes a socket error on next action.
"""
sftp.close()
with pytest.raises(socket.error, match="Socket is closed"):
sftp.open(sftp.FOLDER + "/test2", "w")
def test_sftp_can_be_used_as_context_manager(self, sftp):
"""
verify that the sftp session is closed when exiting the context manager
"""
with sftp:
pass
with pytest.raises(socket.error, match="Socket is closed"):
sftp.open(sftp.FOLDER + "/test2", "w")
def test_write(self, sftp):
"""
verify that a file can be created and written, and the size is correct.
"""
try:
with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
f.write(ARTICLE)
assert sftp.stat(sftp.FOLDER + "/duck.txt").st_size == 1486
finally:
sftp.remove(sftp.FOLDER + "/duck.txt")
def test_sftp_file_can_be_used_as_context_manager(self, sftp):
"""
verify that an opened file can be used as a context manager
"""
try:
with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
f.write(ARTICLE)
assert sftp.stat(sftp.FOLDER + "/duck.txt").st_size == 1486
finally:
sftp.remove(sftp.FOLDER + "/duck.txt")
def test_append(self, sftp):
"""
verify that a file can be opened for append, and tell() still works.
"""
try:
with sftp.open(sftp.FOLDER + "/append.txt", "w") as f:
f.write("first line\nsecond line\n")
assert f.tell() == 23
with sftp.open(sftp.FOLDER + "/append.txt", "a+") as f:
f.write("third line!!!\n")
assert f.tell() == 37
assert f.stat().st_size == 37
f.seek(-26, f.SEEK_CUR)
assert f.readline() == "second line\n"
finally:
sftp.remove(sftp.FOLDER + "/append.txt")
def test_rename(self, sftp):
"""
verify that renaming a file works.
"""
try:
with sftp.open(sftp.FOLDER + "/first.txt", "w") as f:
f.write("content!\n")
sftp.rename(
sftp.FOLDER + "/first.txt", sftp.FOLDER + "/second.txt"
)
with pytest.raises(IOError, match="No such file"):
sftp.open(sftp.FOLDER + "/first.txt", "r")
with sftp.open(sftp.FOLDER + "/second.txt", "r") as f:
f.seek(-6, f.SEEK_END)
assert u(f.read(4)) == "tent"
finally:
# TODO: this is gross, make some sort of 'remove if possible' / 'rm
# -f' a-like, jeez
try:
sftp.remove(sftp.FOLDER + "/first.txt")
except:
pass
try:
sftp.remove(sftp.FOLDER + "/second.txt")
except:
pass
def testa_posix_rename(self, sftp):
"""Test posix-rename@openssh.com protocol extension."""
try:
# first check that the normal rename works as specified
with sftp.open(sftp.FOLDER + "/a", "w") as f:
f.write("one")
sftp.rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
with sftp.open(sftp.FOLDER + "/a", "w") as f:
f.write("two")
with pytest.raises(IOError): # actual message seems generic
sftp.rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
# now check with the posix_rename
sftp.posix_rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
with sftp.open(sftp.FOLDER + "/b", "r") as f:
data = u(f.read())
err = "Contents of renamed file not the same as original file"
assert "two" == data, err
finally:
try:
sftp.remove(sftp.FOLDER + "/a")
except:
pass
try:
sftp.remove(sftp.FOLDER + "/b")
except:
pass
def test_folder(self, sftp):
"""
create a temporary folder, verify that we can create a file in it, then
remove the folder and verify that we can't create a file in it anymore.
"""
sftp.mkdir(sftp.FOLDER + "/subfolder")
sftp.open(sftp.FOLDER + "/subfolder/test", "w").close()
sftp.remove(sftp.FOLDER + "/subfolder/test")
sftp.rmdir(sftp.FOLDER + "/subfolder")
# shouldn't be able to create that file if dir removed
with pytest.raises(IOError, match="No such file"):
sftp.open(sftp.FOLDER + "/subfolder/test")
def test_listdir(self, sftp):
"""
verify that a folder can be created, a bunch of files can be placed in
it, and those files show up in sftp.listdir.
"""
try:
sftp.open(sftp.FOLDER + "/duck.txt", "w").close()
sftp.open(sftp.FOLDER + "/fish.txt", "w").close()
sftp.open(sftp.FOLDER + "/tertiary.py", "w").close()
x = sftp.listdir(sftp.FOLDER)
assert len(x) == 3
assert "duck.txt" in x
assert "fish.txt" in x
assert "tertiary.py" in x
assert "random" not in x
finally:
sftp.remove(sftp.FOLDER + "/duck.txt")
sftp.remove(sftp.FOLDER + "/fish.txt")
sftp.remove(sftp.FOLDER + "/tertiary.py")
def test_listdir_iter(self, sftp):
"""
listdir_iter version of above test
"""
try:
sftp.open(sftp.FOLDER + "/duck.txt", "w").close()
sftp.open(sftp.FOLDER + "/fish.txt", "w").close()
sftp.open(sftp.FOLDER + "/tertiary.py", "w").close()
x = [x.filename for x in sftp.listdir_iter(sftp.FOLDER)]
assert len(x) == 3
assert "duck.txt" in x
assert "fish.txt" in x
assert "tertiary.py" in x
assert "random" not in x
finally:
sftp.remove(sftp.FOLDER + "/duck.txt")
sftp.remove(sftp.FOLDER + "/fish.txt")
sftp.remove(sftp.FOLDER + "/tertiary.py")
@requireNonAsciiLocale()
def test_listdir_in_locale(self, sftp):
"""Test listdir under a locale that uses non-ascii text."""
sftp.open(sftp.FOLDER + "/canard.txt", "w").close()
try:
folder_contents = sftp.listdir(sftp.FOLDER)
assert ["canard.txt"] == folder_contents
finally:
sftp.remove(sftp.FOLDER + "/canard.txt")
def test_setstat(self, sftp):
"""
verify that the setstat functions (chown, chmod, utime, truncate) work.
"""
try:
with sftp.open(sftp.FOLDER + "/special", "w") as f:
f.write("x" * 1024)
stat = sftp.stat(sftp.FOLDER + "/special")
sftp.chmod(sftp.FOLDER + "/special", (stat.st_mode & ~o777) | o600)
stat = sftp.stat(sftp.FOLDER + "/special")
expected_mode = o600
if sys.platform == "win32":
# chmod not really functional on windows
expected_mode = o666
if sys.platform == "cygwin":
# even worse.
expected_mode = o644
assert stat.st_mode & o777 == expected_mode
assert stat.st_size == 1024
mtime = stat.st_mtime - 3600
atime = stat.st_atime - 1800
sftp.utime(sftp.FOLDER + "/special", (atime, mtime))
stat = sftp.stat(sftp.FOLDER + "/special")
assert stat.st_mtime == mtime
if sys.platform not in ("win32", "cygwin"):
assert stat.st_atime == atime
# can't really test chown, since we'd have to know a valid uid.
sftp.truncate(sftp.FOLDER + "/special", 512)
stat = sftp.stat(sftp.FOLDER + "/special")
assert stat.st_size == 512
finally:
sftp.remove(sftp.FOLDER + "/special")
def test_fsetstat(self, sftp):
"""
verify that the fsetstat functions (chown, chmod, utime, truncate)
work on open files.
"""
try:
with sftp.open(sftp.FOLDER + "/special", "w") as f:
f.write("x" * 1024)
with sftp.open(sftp.FOLDER + "/special", "r+") as f:
stat = f.stat()
f.chmod((stat.st_mode & ~o777) | o600)
stat = f.stat()
expected_mode = o600
if sys.platform == "win32":
# chmod not really functional on windows
expected_mode = o666
if sys.platform == "cygwin":
# even worse.
expected_mode = o644
assert stat.st_mode & o777 == expected_mode
assert stat.st_size == 1024
mtime = stat.st_mtime - 3600
atime = stat.st_atime - 1800
f.utime((atime, mtime))
stat = f.stat()
assert stat.st_mtime == mtime
if sys.platform not in ("win32", "cygwin"):
assert stat.st_atime == atime
# can't really test chown, since we'd have to know a valid uid.
f.truncate(512)
stat = f.stat()
assert stat.st_size == 512
finally:
sftp.remove(sftp.FOLDER + "/special")
def test_readline_seek(self, sftp):
"""
create a text file and write a bunch of text into it. then count the
lines in the file, and seek around to retrieve particular lines. this
should verify that read buffering and 'tell' work well together, and
that read buffering is reset on 'seek'.
"""
try:
with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
f.write(ARTICLE)
with sftp.open(sftp.FOLDER + "/duck.txt", "r+") as f:
line_number = 0
loc = 0
pos_list = []
for line in f:
line_number += 1
pos_list.append(loc)
loc = f.tell()
assert f.seekable()
f.seek(pos_list[6], f.SEEK_SET)
assert f.readline(), "Nouzilly == France.\n"
f.seek(pos_list[17], f.SEEK_SET)
assert f.readline()[:4] == "duck"
f.seek(pos_list[10], f.SEEK_SET)
expected = "duck types were equally resistant to exogenous insulin compared with chicken.\n" # noqa
assert f.readline() == expected
finally:
sftp.remove(sftp.FOLDER + "/duck.txt")
def test_write_seek(self, sftp):
"""
Create a text file, seek back, change it, and verify.
"""
try:
with sftp.open(sftp.FOLDER + "/testing.txt", "w") as f:
f.write("hello kitty.\n")
f.seek(-5, f.SEEK_CUR)
f.write("dd")
assert sftp.stat(sftp.FOLDER + "/testing.txt").st_size == 13
with sftp.open(sftp.FOLDER + "/testing.txt", "r") as f:
data = f.read(20)
assert data == b"hello kiddy.\n"
finally:
sftp.remove(sftp.FOLDER + "/testing.txt")
def test_symlink(self, sftp):
"""
create a symlink and then check that lstat doesn't follow it.
"""
if not hasattr(os, "symlink"):
# skip symlink tests on windows
return
try:
with sftp.open(sftp.FOLDER + "/original.txt", "w") as f:
f.write("original\n")
sftp.symlink("original.txt", sftp.FOLDER + "/link.txt")
assert sftp.readlink(sftp.FOLDER + "/link.txt") == "original.txt"
with sftp.open(sftp.FOLDER + "/link.txt", "r") as f:
assert f.readlines() == ["original\n"]
cwd = sftp.normalize(".")
if cwd[-1] == "/":
cwd = cwd[:-1]
abs_path = cwd + "/" + sftp.FOLDER + "/original.txt"
sftp.symlink(abs_path, sftp.FOLDER + "/link2.txt")
assert abs_path == sftp.readlink(sftp.FOLDER + "/link2.txt")
assert sftp.lstat(sftp.FOLDER + "/link.txt").st_size == 12
assert sftp.stat(sftp.FOLDER + "/link.txt").st_size == 9
# the sftp server may be hiding extra path members from us, so the
# length may be longer than we expect:
assert sftp.lstat(sftp.FOLDER + "/link2.txt").st_size >= len(
abs_path
)
assert sftp.stat(sftp.FOLDER + "/link2.txt").st_size == 9
assert sftp.stat(sftp.FOLDER + "/original.txt").st_size == 9
finally:
try:
sftp.remove(sftp.FOLDER + "/link.txt")
except:
pass
try:
sftp.remove(sftp.FOLDER + "/link2.txt")
except:
pass
try:
sftp.remove(sftp.FOLDER + "/original.txt")
except:
pass
def test_flush_seek(self, sftp):
"""
verify that buffered writes are automatically flushed on seek.
"""
try:
with sftp.open(sftp.FOLDER + "/happy.txt", "w", 1) as f:
f.write("full line.\n")
f.write("partial")
f.seek(9, f.SEEK_SET)
f.write("?\n")
with sftp.open(sftp.FOLDER + "/happy.txt", "r") as f:
assert f.readline() == u("full line?\n")
assert f.read(7) == b"partial"
finally:
try:
sftp.remove(sftp.FOLDER + "/happy.txt")
except:
pass
def test_realpath(self, sftp):
"""
test that realpath is returning something non-empty and not an
error.
"""
pwd = sftp.normalize(".")
assert len(pwd) > 0
f = sftp.normalize("./" + sftp.FOLDER)
assert len(f) > 0
assert os.path.join(pwd, sftp.FOLDER) == f
def test_mkdir(self, sftp):
"""
verify that mkdir/rmdir work.
"""
sftp.mkdir(sftp.FOLDER + "/subfolder")
with pytest.raises(IOError): # generic msg only
sftp.mkdir(sftp.FOLDER + "/subfolder")
sftp.rmdir(sftp.FOLDER + "/subfolder")
with pytest.raises(IOError, match="No such file"):
sftp.rmdir(sftp.FOLDER + "/subfolder")
def test_chdir(self, sftp):
"""
verify that chdir/getcwd work.
"""
root = sftp.normalize(".")
if root[-1] != "/":
root += "/"
try:
sftp.mkdir(sftp.FOLDER + "/alpha")
sftp.chdir(sftp.FOLDER + "/alpha")
sftp.mkdir("beta")
assert root + sftp.FOLDER + "/alpha" == sftp.getcwd()
assert ["beta"] == sftp.listdir(".")
sftp.chdir("beta")
with sftp.open("fish", "w") as f:
f.write("hello\n")
sftp.chdir("..")
assert ["fish"] == sftp.listdir("beta")
sftp.chdir("..")
assert ["fish"] == sftp.listdir("alpha/beta")
finally:
sftp.chdir(root)
try:
sftp.unlink(sftp.FOLDER + "/alpha/beta/fish")
except:
pass
try:
sftp.rmdir(sftp.FOLDER + "/alpha/beta")
except:
pass
try:
sftp.rmdir(sftp.FOLDER + "/alpha")
except:
pass
def test_get_put(self, sftp):
"""
verify that get/put work.
"""
warnings.filterwarnings("ignore", "tempnam.*")
fd, localname = mkstemp()
os.close(fd)
text = b"All I wanted was a plastic bunny rabbit.\n"
with open(localname, "wb") as f:
f.write(text)
saved_progress = []
def progress_callback(x, y):
saved_progress.append((x, y))
sftp.put(localname, sftp.FOLDER + "/bunny.txt", progress_callback)
with sftp.open(sftp.FOLDER + "/bunny.txt", "rb") as f:
assert text == f.read(128)
assert [(41, 41)] == saved_progress
os.unlink(localname)
fd, localname = mkstemp()
os.close(fd)
saved_progress = []
sftp.get(sftp.FOLDER + "/bunny.txt", localname, progress_callback)
with open(localname, "rb") as f:
assert text == f.read(128)
assert [(41, 41)] == saved_progress
os.unlink(localname)
sftp.unlink(sftp.FOLDER + "/bunny.txt")
def test_get_without_prefetch(self, sftp):
"""
Create a 4MB file. Verify that pull works without prefetching
using a lager file.
"""
sftp_filename = sftp.FOLDER + "/dummy_file"
num_chars = 1024 * 1024 * 4
fd, localname = mkstemp()
os.close(fd)
with open(localname, "wb") as f:
f.write(b"0" * num_chars)
sftp.put(localname, sftp_filename)
os.unlink(localname)
fd, localname = mkstemp()
os.close(fd)
sftp.get(sftp_filename, localname, prefetch=False)
assert os.stat(localname).st_size == num_chars
os.unlink(localname)
sftp.unlink(sftp_filename)
def test_check(self, sftp):
"""
verify that file.check() works against our own server.
(it's an sftp extension that we support, and may be the only ones who
support it.)
"""
with sftp.open(sftp.FOLDER + "/kitty.txt", "w") as f:
f.write("here kitty kitty" * 64)
try:
with sftp.open(sftp.FOLDER + "/kitty.txt", "r") as f:
sum = f.check("sha1")
assert (
"91059CFC6615941378D413CB5ADAF4C5EB293402"
== u(hexlify(sum)).upper()
)
sum = f.check("md5", 0, 512)
assert (
"93DE4788FCA28D471516963A1FE3856A"
== u(hexlify(sum)).upper()
)
sum = f.check("md5", 0, 0, 510)
expected = "EB3B45B8CD55A0707D99B177544A319F373183D241432BB2157AB9E46358C4AC90370B5CADE5D90336FC1716F90B36D6" # noqa
assert u(hexlify(sum)).upper() == expected
finally:
sftp.unlink(sftp.FOLDER + "/kitty.txt")
def test_x_flag(self, sftp):
"""
verify that the 'x' flag works when opening a file.
"""
sftp.open(sftp.FOLDER + "/unusual.txt", "wx").close()
try:
with pytest.raises(IOError):
sftp.open(sftp.FOLDER + "/unusual.txt", "wx")
finally:
sftp.unlink(sftp.FOLDER + "/unusual.txt")
def test_utf8(self, sftp):
"""
verify that unicode strings are encoded into utf8 correctly.
"""
with sftp.open(sftp.FOLDER + "/something", "w") as f:
f.write("okay")
try:
sftp.rename(
sftp.FOLDER + "/something", sftp.FOLDER + "/" + unicode_folder
)
sftp.open(b(sftp.FOLDER) + utf8_folder, "r")
finally:
sftp.unlink(b(sftp.FOLDER) + utf8_folder)
def test_utf8_chdir(self, sftp):
sftp.mkdir(sftp.FOLDER + "/" + unicode_folder)
try:
sftp.chdir(sftp.FOLDER + "/" + unicode_folder)
with sftp.open("something", "w") as f:
f.write("okay")
sftp.unlink("something")
finally:
sftp.chdir()
sftp.rmdir(sftp.FOLDER + "/" + unicode_folder)
def test_bad_readv(self, sftp):
"""
verify that readv at the end of the file doesn't essplode.
"""
sftp.open(sftp.FOLDER + "/zero", "w").close()
try:
with sftp.open(sftp.FOLDER + "/zero", "r") as f:
f.readv([(0, 12)])
with sftp.open(sftp.FOLDER + "/zero", "r") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
f.read(100)
finally:
sftp.unlink(sftp.FOLDER + "/zero")
def test_put_without_confirm(self, sftp):
"""
verify that get/put work without confirmation.
"""
warnings.filterwarnings("ignore", "tempnam.*")
fd, localname = mkstemp()
os.close(fd)
text = b"All I wanted was a plastic bunny rabbit.\n"
with open(localname, "wb") as f:
f.write(text)
saved_progress = []
def progress_callback(x, y):
saved_progress.append((x, y))
res = sftp.put(
localname, sftp.FOLDER + "/bunny.txt", progress_callback, False
)
assert SFTPAttributes().attr == res.attr
with sftp.open(sftp.FOLDER + "/bunny.txt", "r") as f:
assert text == f.read(128)
assert (41, 41) == saved_progress[-1]
os.unlink(localname)
sftp.unlink(sftp.FOLDER + "/bunny.txt")
def test_getcwd(self, sftp):
"""
verify that chdir/getcwd work.
"""
assert sftp.getcwd() is None
root = sftp.normalize(".")
if root[-1] != "/":
root += "/"
try:
sftp.mkdir(sftp.FOLDER + "/alpha")
sftp.chdir(sftp.FOLDER + "/alpha")
assert sftp.getcwd() == "/" + sftp.FOLDER + "/alpha"
finally:
sftp.chdir(root)
try:
sftp.rmdir(sftp.FOLDER + "/alpha")
except:
pass
def test_seek_append(self, sftp):
"""
verify that seek doesn't affect writes during append.
does not work except through paramiko. :( openssh fails.
"""
try:
with sftp.open(sftp.FOLDER + "/append.txt", "a") as f:
f.write("first line\nsecond line\n")
f.seek(11, f.SEEK_SET)
f.write("third line\n")
with sftp.open(sftp.FOLDER + "/append.txt", "r") as f:
assert f.stat().st_size == 34
assert f.readline() == "first line\n"
assert f.readline() == "second line\n"
assert f.readline() == "third line\n"
finally:
sftp.remove(sftp.FOLDER + "/append.txt")
def test_putfo_empty_file(self, sftp):
"""
Send an empty file and confirm it is sent.
"""
target = sftp.FOLDER + "/empty file.txt"
stream = StringIO()
try:
attrs = sftp.putfo(stream, target)
# the returned attributes should not be null
assert attrs is not None
finally:
sftp.remove(target)
# TODO: this test doesn't actually fail if the regression (removing '%'
# expansion to '%%' within sftp.py's def _log()) is removed - stacktraces
# appear but they're clearly emitted from subthreads that have no error
# handling. No point running it until that is fixed somehow.
@pytest.mark.skip("Doesn't prove anything right now")
def test_file_with_percent(self, sftp):
"""
verify that we can create a file with a '%' in the filename.
( it needs to be properly escaped by _log() )
"""
f = sftp.open(sftp.FOLDER + "/test%file", "w")
try:
assert f.stat().st_size == 0
finally:
f.close()
sftp.remove(sftp.FOLDER + "/test%file")
def test_non_utf8_data(self, sftp):
"""Test write() and read() of non utf8 data"""
try:
with sftp.open(f"{sftp.FOLDER}/nonutf8data", "w") as f:
f.write(NON_UTF8_DATA)
with sftp.open(f"{sftp.FOLDER}/nonutf8data", "r") as f:
data = f.read()
assert data == NON_UTF8_DATA
with sftp.open(f"{sftp.FOLDER}/nonutf8data", "wb") as f:
f.write(NON_UTF8_DATA)
with sftp.open(f"{sftp.FOLDER}/nonutf8data", "rb") as f:
data = f.read()
assert data == NON_UTF8_DATA
finally:
sftp.remove(f"{sftp.FOLDER}/nonutf8data")
@requireNonAsciiLocale("LC_TIME")
def test_sftp_attributes_locale_time(self, sftp):
"""Test SFTPAttributes under a locale with non-ascii time strings."""
some_stat = os.stat(sftp.FOLDER)
sftp_attributes = SFTPAttributes.from_stat(some_stat, u("a_directory"))
assert b"a_directory" in sftp_attributes.asbytes()
def test_sftp_attributes_empty_str(self, sftp):
sftp_attributes = SFTPAttributes()
assert (
str(sftp_attributes)
== "?--------- 1 0 0 0 (unknown date) ?"
)
@needs_builtin("buffer")
def test_write_buffer(self, sftp):
"""Test write() using a buffer instance."""
data = 3 * b"A potentially large block of data to chunk up.\n"
try:
with sftp.open(f"{sftp.FOLDER}/write_buffer", "wb") as f:
for offset in range(0, len(data), 8):
f.write(buffer(data, offset, 8)) # noqa
with sftp.open(f"{sftp.FOLDER}/write_buffer", "rb") as f:
assert f.read() == data
finally:
sftp.remove(f"{sftp.FOLDER}/write_buffer")
@needs_builtin("memoryview")
def test_write_memoryview(self, sftp):
"""Test write() using a memoryview instance."""
data = 3 * b"A potentially large block of data to chunk up.\n"
try:
with sftp.open(f"{sftp.FOLDER}/write_memoryview", "wb") as f:
view = memoryview(data)
for offset in range(0, len(data), 8):
f.write(view[offset : offset + 8])
with sftp.open(f"{sftp.FOLDER}/write_memoryview", "rb") as f:
assert f.read() == data
finally:
sftp.remove(f"{sftp.FOLDER}/write_memoryview")
| TestSFTP |
python | kamyu104__LeetCode-Solutions | Python/most-frequent-ids.py | {
"start": 790,
"end": 1491
} | class ____(object):
def mostFrequentIDs(self, nums, freq):
"""
:type nums: List[int]
:type freq: List[int]
:rtype: List[int]
"""
result = []
cnt = collections.Counter()
cnt2 = collections.Counter()
sl = SortedList()
for x, f in itertools.izip(nums, freq):
sl.discard((cnt[x], cnt2[cnt[x]]))
cnt2[cnt[x]] -= 1
if cnt2[cnt[x]]:
sl.add((cnt[x], cnt2[cnt[x]]))
cnt[x] += f
sl.discard((cnt[x], cnt2[cnt[x]]))
cnt2[cnt[x]] += 1
sl.add((cnt[x], cnt2[cnt[x]]))
result.append(sl[-1][0])
return result
| Solution2 |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 108081,
"end": 118757
} | class ____(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
config: Qwen2_5OmniTalkerConfig
base_model_prefix = "talker"
output_modalities = ("audio",)
def __init__(self, config: Qwen2_5OmniTalkerConfig):
super().__init__(config)
self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size)
self.model = Qwen2_5OmniTalkerModel(config)
self.codebook_size = config.vocab_size
self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False)
self.codec_bos_token = config.tts_codec_start_token_id
self.codec_eos_token = config.tts_codec_end_token_id
self.codec_pad_token = config.tts_codec_pad_token_id
self.codec_mask_token = config.tts_codec_mask_token_id
self.text_bos_token = config.tts_text_start_token_id
self.text_eos_token = config.tts_text_end_token_id
self.text_pad_token = config.tts_text_pad_token_id
self.spatial_merge_size = self.config.spatial_merge_size
self.rope_deltas = None
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
thinker_reply_part: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
rope_deltas: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
input_text_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
use_audio_in_video: Optional[bool] = None,
audio_feature_lengths: Optional[torch.LongTensor] = None,
video_second_per_grid: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]:
r"""
thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Hidden states from the thinker model's output that represent the text reply part to be processed.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input token IDs for text-only content, used for position calculation in multimodal contexts.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
use_audio_in_video (`bool`, *optional*):
Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
Number of seconds per grid for each video, used for temporal feature mapping.
Example:
```python
>>> from io import BytesIO
>>> from urllib.request import urlopen
>>> import librosa
>>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration
>>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B")
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B")
>>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:"
>>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
>>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate)
>>> inputs = processor(text=prompt, audio=audio, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Generate the caption in English: Glass is breaking."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None and position_ids is None:
if (
cache_position is None
or (cache_position is not None and cache_position[0] == 0)
or self.rope_deltas is None
):
position_ids, rope_deltas = self.get_rope_index(
input_text_ids,
image_grid_thw,
video_grid_thw,
attention_mask,
use_audio_in_video,
audio_feature_lengths,
video_second_per_grid,
)
inputs_embeds[:, -1, :] += self.get_input_embeddings()(
torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device)
)
inputs_embeds[:, -2, :] += self.get_input_embeddings()(
torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device)
)
self.rope_deltas = rope_deltas
else:
batch_size, seq_length = input_ids.shape
delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0
position_ids = torch.arange(seq_length, device=input_ids.device)
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
position_ids = position_ids.add(delta)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
if inputs_embeds is None:
# 1. Inference tokens after second token
codec_embeds = self.get_input_embeddings()(input_ids)
inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :]
if thinker_reply_part.shape[1] > 1:
thinker_reply_part = thinker_reply_part[:, 1:, :]
talker_lm_input = self.thinker_to_talker_proj(inputs_embeds)
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
outputs = self.model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=talker_lm_input,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.codec_head(hidden_states)
logits = logits.float()
loss = None
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return Qwen2_5OmniTalkerCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=hidden_states,
attentions=outputs.attentions,
rope_deltas=self.rope_deltas,
thinker_reply_part=thinker_reply_part,
)
def _get_initial_cache_position(self, seq_length, device, model_kwargs):
# Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily
inputs_embeds = model_kwargs.pop("inputs_embeds")
model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs)
model_kwargs["inputs_embeds"] = inputs_embeds
return model_kwargs
# prepare inputs for talker lm generation
def prepare_inputs_for_generation(
self,
input_ids,
input_text_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
thinker_reply_part=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
input_audio_features=None,
audio_feature_attention_mask=None,
audio_feature_lengths=None,
use_audio_in_video=False,
video_second_per_grid=None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values,
attention_mask,
inputs_embeds,
cache_position,
use_cache=use_cache,
thinker_reply_part=thinker_reply_part,
input_text_ids=input_text_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_audio_in_video=use_audio_in_video,
audio_feature_lengths=audio_feature_lengths,
video_second_per_grid=video_second_per_grid,
**kwargs,
)
model_inputs["position_ids"] = None
return model_inputs
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: dict[str, Any],
is_encoder_decoder: bool = False,
num_new_tokens: int = 1,
) -> dict[str, Any]:
model_kwargs = super()._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder, num_new_tokens
)
if getattr(outputs, "thinker_reply_part", None) is not None:
model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part
return model_kwargs
| Qwen2_5OmniTalkerForConditionalGeneration |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 2335,
"end": 2564
} | class ____(BadHttpMessage):
def __init__(self, line: str = "", error: str | None = None) -> None:
super().__init__(error or f"Bad status line {line!r}")
self.args = (line,)
self.line = line
| BadStatusLine |
python | huggingface__transformers | src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py | {
"start": 34349,
"end": 35573
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
# feature dim might need to be down-projected
if config.output_hidden_size != config.hidden_size:
self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
else:
self.proj = self.proj_layer_norm = None
self.layers = nn.ModuleList(Wav2Vec2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers))
self.layerdrop = config.layerdrop
def forward(self, hidden_states):
# down project hidden_states if necessary
if self.proj is not None and self.proj_layer_norm is not None:
hidden_states = self.proj(hidden_states)
hidden_states = self.proj_layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
for layer in self.layers:
layerdrop_prob = np.random.random()
if not self.training or (layerdrop_prob > self.layerdrop):
hidden_states = layer(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
| Wav2Vec2ConformerAdapter |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/richlog_scroll.py | {
"start": 123,
"end": 1036
} | class ____(App):
CSS = """
RichLog{
width: 1fr;
height: 10;
}
"""
def compose(self) -> ComposeResult:
with Horizontal():
# Don't scroll on write
yield RichLog(id="richlog1", auto_scroll=False)
# Scroll on write
yield RichLog(id="richlog2", auto_scroll=True)
# Scroll on write, but disabled on write()
yield RichLog(id="richlog3", auto_scroll=True)
def on_ready(self) -> None:
lines = [f"Line {n}" for n in range(20)]
for line in lines:
self.query_one("#richlog1", RichLog).write(line)
for line in lines:
self.query_one("#richlog2", RichLog).write(line)
for line in lines:
self.query_one("#richlog3", RichLog).write(line, scroll_end=False)
if __name__ == "__main__":
app = RichLogScrollApp()
app.run()
| RichLogScrollApp |
python | ansible__ansible | test/units/module_utils/common/test_dict_transformations.py | {
"start": 1286,
"end": 1478
} | class ____:
def test_snake_to_camel_reversed(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
assert _snake_to_camel(v, capitalize_first=True) == k
| TestCaseSnakeToCamel |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-reduce-x-to-zero.py | {
"start": 29,
"end": 584
} | class ____(object):
def minOperations(self, nums, x):
"""
:type nums: List[int]
:type x: int
:rtype: int
"""
target = sum(nums)-x
result = -1
curr = left = 0
for right in xrange(len(nums)):
curr += nums[right]
while left < len(nums) and curr > target:
curr -= nums[left]
left += 1
if curr == target:
result = max(result, right-left+1)
return len(nums)-result if result != -1 else -1
| Solution |
python | Netflix__metaflow | metaflow/sidecar/sidecar_subprocess.py | {
"start": 811,
"end": 938
} | class ____(Exception):
"""raised when trying unable to send message to sidecar in allocated time"""
pass
| MsgTimeoutError |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 270632,
"end": 271905
} | class ____(ConditionalValueDefnumberExprRef):
"""
ConditionalPredicateValueDefnumberExprRef schema wrapper.
Parameters
----------
test : str, dict, :class:`Predicate`, :class:`FieldGTPredicate`, :class:`FieldLTPredicate`, :class:`FieldGTEPredicate`, :class:`FieldLTEPredicate`, :class:`LogicalOrPredicate`, :class:`ParameterPredicate`, :class:`FieldEqualPredicate`, :class:`FieldOneOfPredicate`, :class:`FieldRangePredicate`, :class:`FieldValidPredicate`, :class:`LogicalAndPredicate`, :class:`LogicalNotPredicate`, :class:`PredicateComposition`
Predicate for triggering the condition
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {"$ref": "#/definitions/ConditionalPredicate<ValueDef<(number|ExprRef)>>"}
def __init__(
self,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(test=test, value=value, **kwds)
| ConditionalPredicateValueDefnumberExprRef |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_group_search_views.py | {
"start": 2673,
"end": 8734
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (MemberPermission,)
def get(self, request: Request, organization: Organization) -> Response:
"""
List the current organization member's custom views
`````````````````````````````````````````
Retrieve a list of custom views for the current organization member.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = OrganizationGroupSearchViewGetSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
starred_view_ids = GroupSearchViewStarred.objects.filter(
organization=organization, user_id=request.user.id
).values_list("group_search_view_id", flat=True)
createdBy = serializer.validated_data.get("createdBy", "me")
sorts = [SORT_MAP[sort] for sort in serializer.validated_data["sort"]]
query = serializer.validated_data.get("query")
base_queryset = (
GroupSearchView.objects.filter(organization=organization)
if not query
else GroupSearchView.objects.filter(
Q(query__icontains=query) | Q(name__icontains=query),
organization=organization,
)
)
last_visited_query = Subquery(
GroupSearchViewLastVisited.objects.filter(
organization=organization,
user_id=request.user.id,
group_search_view_id=OuterRef("id"),
).values("last_visited")[:1]
)
starred_count_query = Count("groupsearchviewstarred")
if createdBy == "me":
starred_query = (
base_queryset.filter(
user_id=request.user.id,
id__in=starred_view_ids,
)
.prefetch_related("projects")
.annotate(popularity=starred_count_query, last_visited=last_visited_query)
.order_by(*sorts)
)
non_starred_query = (
base_queryset.filter(
user_id=request.user.id,
)
.exclude(id__in=starred_view_ids)
.prefetch_related("projects")
.annotate(popularity=starred_count_query, last_visited=last_visited_query)
.order_by(*sorts)
)
elif createdBy == "others":
starred_query = (
base_queryset.filter(
visibility=GroupSearchViewVisibility.ORGANIZATION,
id__in=starred_view_ids,
)
.exclude(user_id=request.user.id)
.prefetch_related("projects")
.annotate(popularity=starred_count_query, last_visited=last_visited_query)
.order_by(*sorts)
)
non_starred_query = (
base_queryset.filter(
visibility=GroupSearchViewVisibility.ORGANIZATION,
)
.exclude(user_id=request.user.id)
.exclude(id__in=starred_view_ids)
.prefetch_related("projects")
.annotate(popularity=starred_count_query, last_visited=last_visited_query)
.order_by(*sorts)
)
return self.paginate(
request=request,
sources=[starred_query, non_starred_query],
paginator_cls=ChainPaginator,
on_results=lambda x: serialize(
x,
request.user,
serializer=GroupSearchViewSerializer(
organization=organization,
),
),
)
def post(self, request: Request, organization: Organization) -> Response:
"""
Create a new custom view for the current organization member.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not features.has("organizations:issue-views", organization, actor=request.user):
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GroupSearchViewPostValidator(
data=request.data, context={"organization": organization}
)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
validated_data = serializer.validated_data
projects = Project.objects.filter(
id__in=validated_data["projects"], organization=organization
)
for project in projects:
self.check_object_permissions(request, project)
# Create the new view
view = GroupSearchView.objects.create(
organization=organization,
user_id=request.user.id,
name=validated_data["name"],
query=validated_data["query"],
query_sort=validated_data["querySort"],
is_all_projects=validated_data["isAllProjects"],
environments=validated_data["environments"],
time_filters=validated_data["timeFilters"],
visibility=GroupSearchViewVisibility.ORGANIZATION,
)
view.projects.set(validated_data["projects"])
if validated_data.get("starred"):
GroupSearchViewStarred.objects.insert_starred_view(
organization=organization,
user_id=request.user.id,
view=view,
)
return Response(
serialize(
view,
request.user,
serializer=GroupSearchViewSerializer(
organization=organization,
),
),
status=status.HTTP_201_CREATED,
)
| OrganizationGroupSearchViewsEndpoint |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/ecs.py | {
"start": 1523,
"end": 1776
} | class ____(_StringCompareEnum):
"""Contains the possible State values of an ECS Cluster."""
ACTIVE = "ACTIVE"
PROVISIONING = "PROVISIONING"
DEPROVISIONING = "DEPROVISIONING"
FAILED = "FAILED"
INACTIVE = "INACTIVE"
| EcsClusterStates |
python | PrefectHQ__prefect | src/prefect/settings/models/api.py | {
"start": 229,
"end": 1796
} | class ____(PrefectBaseSettings):
"""
Settings for interacting with the Prefect API
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("api",))
url: Optional[str] = Field(
default=None,
description="The URL of the Prefect API. If not set, the client will attempt to infer it.",
)
auth_string: Optional[SecretStr] = Field(
default=None,
description="The auth string used for basic authentication with a self-hosted Prefect API. Should be kept secret.",
)
key: Optional[SecretStr] = Field(
default=None,
description="The API key used for authentication with the Prefect API. Should be kept secret.",
)
tls_insecure_skip_verify: bool = Field(
default=False,
description="If `True`, disables SSL checking to allow insecure requests. Setting to False is recommended only during development. For example, when using self-signed certificates.",
)
ssl_cert_file: Optional[str] = Field(
default=os.environ.get("SSL_CERT_FILE"),
description="This configuration settings option specifies the path to an SSL certificate file.",
)
enable_http2: bool = Field(
default=False,
description="If true, enable support for HTTP/2 for communicating with an API. If the API does not support HTTP/2, this will have no effect and connections will be made via HTTP/1.1.",
)
request_timeout: float = Field(
default=60.0,
description="The default timeout for requests to the API",
)
| APISettings |
python | pytorch__pytorch | torch/_inductor/codegen/triton.py | {
"start": 20578,
"end": 21410
} | class ____(BlockDescriptorOptions):
def format(self, name: str, roffset=True) -> str:
"""
Codegen a call to tl.make_tensor_descriptor()
Args:
name: variable name for pointer
roffset: unused, but kept for compatibility with BlockPtrOptions.format()
Returns:
"tl.make_tensor_descriptor(...)"
"""
f = V.kernel.index_to_str
args = [
(
f"{name} + ({f(self.constant_offset)})"
if self.constant_offset != 0
else name
),
f"shape={f(self.shape)}",
f"strides={f(self.strides)}",
f"block_shape={f(self.block_shape)}",
]
return f"tl.make_tensor_descriptor({', '.join(args)})"
@dataclasses.dataclass
| TensorDescriptorOptions |
python | networkx__networkx | networkx/utils/union_find.py | {
"start": 72,
"end": 3338
} | class ____:
"""Union-find data structure.
Each unionFind instance X maintains a family of disjoint sets of
hashable objects, supporting the following two methods:
- X[item] returns a name for the set containing the given item.
Each set is named by an arbitrarily-chosen one of its members; as
long as the set remains unchanged it will keep the same name. If
the item is not yet part of a set in X, a new singleton set is
created for it.
- X.union(item1, item2, ...) merges the sets containing each item
into a single larger set. If any item is not yet part of a set
in X, it is added to X as one of the members of the merged set.
Union-find data structure. Based on Josiah Carlson's code,
https://code.activestate.com/recipes/215912/
with significant additional changes by D. Eppstein.
http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
"""
def __init__(self, elements=None):
"""Create a new empty union-find structure.
If *elements* is an iterable, this structure will be initialized
with the discrete partition on the given set of elements.
"""
if elements is None:
elements = ()
self.parents = {}
self.weights = {}
for x in elements:
self.weights[x] = 1
self.parents[x] = x
def __getitem__(self, object):
"""Find and return the name of the set containing the object."""
# check for previously unknown object
if object not in self.parents:
self.parents[object] = object
self.weights[object] = 1
return object
# find path of objects leading to the root
path = []
root = self.parents[object]
while root != object:
path.append(object)
object = root
root = self.parents[object]
# compress the path and return
for ancestor in path:
self.parents[ancestor] = root
return root
def __iter__(self):
"""Iterate through all items ever found or unioned by this structure."""
return iter(self.parents)
def to_sets(self):
"""Iterates over the sets stored in this structure.
For example::
>>> partition = UnionFind("xyz")
>>> sorted(map(sorted, partition.to_sets()))
[['x'], ['y'], ['z']]
>>> partition.union("x", "y")
>>> sorted(map(sorted, partition.to_sets()))
[['x', 'y'], ['z']]
"""
# Ensure fully pruned paths
for x in self.parents:
_ = self[x] # Evaluated for side-effect only
yield from groups(self.parents).values()
def union(self, *objects):
"""Find the sets containing the objects and merge them all."""
# Find the heaviest root according to its weight.
roots = iter(
sorted(
{self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True
)
)
try:
root = next(roots)
except StopIteration:
return
for r in roots:
self.weights[root] += self.weights[r]
self.parents[r] = root
| UnionFind |
python | huggingface__transformers | src/transformers/models/instructblipvideo/configuration_instructblipvideo.py | {
"start": 1430,
"end": 5427
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`InstructBlipVideoVisionModel`]. It is used to
instantiate a InstructBlipVideo vision encoder according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBlipVideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import InstructBlipVideoVisionConfig, InstructBlipVideoVisionModel
>>> # Initializing a InstructBlipVideoVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoVisionConfig()
>>> # Initializing a InstructBlipVideoVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "instructblipvideo_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1408,
intermediate_size=6144,
num_hidden_layers=39,
num_attention_heads=16,
image_size=224,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=1e-10,
qkv_bias=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
| InstructBlipVideoVisionConfig |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_between_inclusive_threshold_range.py | {
"start": 5731,
"end": 13536
} | class ____(
ProfileNumericColumnsDiffExpectation
):
"""Expect a statistic's value for a given column of a DataProfiler difference report to be within the specified threshold, inclusive.
This expectation takes the difference report between the data it is called on and a DataProfiler profile of the same schema loaded from a provided path.
This function builds upon the custom ProfileNumericColumnsDiff Expectation of Capital One's DataProfiler Expectations.
Each numerical column will be checked against a user provided dictionary of columns paired with dictionaries of statistics containing lower and upper bounds.
It is expected that a statistics value for a given column is within the specified threshold, inclusive.
Args:
profile_path (str): A path to a saved DataProfiler profile object on the local filesystem.
limit_check_report_keys (dict): A dict, containing column names as keys and dicts as values that contain statistics as keys and dicts as values containing two keys:
"lower" denoting the lower bound for the threshold range, and "upper" denoting the upper bound for the threshold range.
mostly (float - optional): a value indicating the lower bound percentage of successful values that must be present to evaluate to success=True.
validator.expect_profile_numerical_columns_diff_between_threshold_range(
profile_path = "C:/path_to/my_profile.pkl",
limit_check_report_keys = {
"column_one": {
"min": {"lower": 2.0, "upper": 10.0},
},
"*": {
"*": {"lower": 0, "upper": 100},
},
}
)
Note: In limit_check_report_keys, "*" in place of a column denotes a general operator in which the value it stores will be applied to every column in the data that has no explicit key.
"*" in place of a statistic denotes a general operator in which the bounds it stores will be applied to every statistic for the given column that has no explicit key.
"""
example_profile_data = [
[2, 5, "10", "ten", 25],
[4, 10, "20", "twenty", 50],
[6, 15, "30", "thirty", 75],
[8, 20, "40", "forty", 100],
[10, 25, "50", "fifty", 125],
]
example_profile_columns = [
"by_2",
"by_5",
"str_by_10",
"words_by_10",
"by_25",
]
df = pd.DataFrame(example_profile_data, columns=example_profile_columns)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
example_profile = dp.Profiler(df, options=profiler_opts)
profile_path = "/example_profiles/expect_profile_diff_less_than_threshold_profile.pkl"
dir_path = os.path.dirname(os.path.abspath(__file__)) # noqa: PTH120, PTH100
profile_path = dir_path + profile_path
example_profile.save(filepath=profile_path)
examples = [
{
"data": {
"by_2": [4, 6, 8, 10, 12],
"by_5": [10, 15, 20, 25, 30],
"str_by_10": ["20", "30", "40", "50", "60"],
"words_by_10": ["twenty", "thirty", "forty", "fifty", "sixty"],
"by_25": [50, 75, 100, 125, 150],
},
"tests": [
{
"title": "profile_min_delta_witin_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {
"min": {"lower": 0, "upper": 50},
},
},
},
"out": {"success": True},
},
{
"title": "profile_all_stats_beyond_delta_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {"*": {"lower": 0, "upper": 0}},
"by_2": {
"min": {"lower": -1, "upper": 1},
},
},
},
"out": {"success": False},
},
{
"title": "checking_single_failure_in_one_column",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {"*": {"lower": -25, "upper": 50}},
"by_2": {"min": {"lower": 0, "upper": 0}},
},
},
"out": {"success": False},
},
{
"title": "single_failure_still_mostly_successful",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {"*": {"lower": -25, "upper": 50}},
"by_2": {"min": {"lower": 0, "upper": 0}},
},
"mostly": 0.75,
},
"out": {"success": True},
},
],
},
]
profile_metric = "data_profiler.profile_numeric_columns_diff_between_inclusive_threshold_range"
success_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
"mostly",
)
default_limit_check_report_keys = {
"*": {
"min": {"lower": 0, "upper": 0},
"max": {"lower": 0, "upper": 0},
"sum": {"lower": 0, "upper": 0},
"mean": {"lower": 0, "upper": 0},
"median": {"lower": 0, "upper": 0},
"median_absolute_deviation": {"lower": 0, "upper": 0},
"variance": {"lower": 0, "upper": 0},
"stddev": {"lower": 0, "upper": 0},
"unique_count": {"lower": 0, "upper": 0},
"unique_ratio": {"lower": 0, "upper": 0},
"gini_impurity": {"lower": 0, "upper": 0},
"unalikeability": {"lower": 0, "upper": 0},
"sample_size": {"lower": 0, "upper": 0},
"null_count": {"lower": 0, "upper": 0},
}
}
numerical_diff_statistics = list(default_limit_check_report_keys["*"].keys())
default_kwarg_values = {
"limit_check_report_keys": default_limit_check_report_keys,
"numerical_diff_statistics": numerical_diff_statistics,
"mostly": 1.0,
}
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": [
"dataprofiler",
"dataassistance",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@stevensecreti", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
diagnostics_report = (
ExpectProfileNumericColumnsDiffBetweenInclusiveThresholdRange().run_diagnostics()
)
print(diagnostics_report.generate_checklist())
| ExpectProfileNumericColumnsDiffBetweenInclusiveThresholdRange |
python | scipy__scipy | scipy/special/tests/test_gammainc.py | {
"start": 2678,
"end": 4441
} | class ____:
@pytest.mark.parametrize('a, x', INVALID_POINTS)
def test_domain(self, a, x):
assert np.isnan(sc.gammaincc(a, x))
def test_a_eq_0_x_gt_0(self):
assert sc.gammaincc(0, 1) == 0
@pytest.mark.parametrize('a, x, desired', [
(np.inf, 1, 1),
(np.inf, 0, 1),
(np.inf, np.inf, np.nan),
(1, np.inf, 0)
])
def test_infinite_arguments(self, a, x, desired):
result = sc.gammaincc(a, x)
if np.isnan(desired):
assert np.isnan(result)
else:
assert result == desired
@pytest.mark.parametrize("x", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan])
def test_a_nan(self, x):
assert np.isnan(sc.gammaincc(np.nan, x))
@pytest.mark.parametrize("a", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan])
def test_x_nan(self, a):
assert np.isnan(sc.gammaincc(a, np.nan))
def test_infinite_limits(self):
# Test that large arguments converge to the hard-coded limits
# at infinity.
assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100)
assert_allclose(
sc.gammaincc(100, 1000),
sc.gammaincc(100, np.inf),
atol=1e-200, # Use `atol` since the function converges to 0.
rtol=0
)
def test_limit_check(self):
result = sc.gammaincc(1e-10,1)
limit = sc.gammaincc(0,1)
assert np.isclose(result, limit)
def test_x_zero(self):
a = np.arange(1, 10)
assert_array_equal(sc.gammaincc(a, 0), 1)
def test_roundtrip(self):
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammainccinv(a, sc.gammaincc(a, x))
assert_allclose(x, y, rtol=1e-14)
| TestGammaincc |
python | gevent__gevent | src/greentest/3.12/test_weakref.py | {
"start": 1323,
"end": 1391
} | class ____:
def __init__(self):
self.cycle = self
| RefCycle |
python | explosion__spaCy | spacy/lang/ja/__init__.py | {
"start": 7550,
"end": 12566
} | class ____(Language):
lang = "ja"
Defaults = JapaneseDefaults
@Japanese.factory(
"morphologizer",
assigns=["token.morph", "token.pos"],
default_config={
"model": DEFAULT_MORPH_MODEL,
"overwrite": True,
"extend": True,
"scorer": {"@scorers": "spacy.morphologizer_scorer.v1"},
},
default_score_weights={
"pos_acc": 0.5,
"morph_micro_f": 0.5,
"morph_per_feat": None,
},
)
def make_morphologizer(
nlp: Language,
model: Model,
name: str,
overwrite: bool,
extend: bool,
scorer: Optional[Callable],
):
return Morphologizer(
nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer
)
# Hold the attributes we need with convenient names
DetailedToken = namedtuple(
"DetailedToken", ["surface", "tag", "inf", "lemma", "norm", "reading", "sub_tokens"]
)
def try_sudachi_import(split_mode="A"):
"""SudachiPy is required for Japanese support, so check for it.
It it's not available blow up and explain how to fix it.
split_mode should be one of these values: "A", "B", "C", None->"A"."""
try:
from sudachipy import dictionary, tokenizer
split_mode = {
None: tokenizer.Tokenizer.SplitMode.A,
"A": tokenizer.Tokenizer.SplitMode.A,
"B": tokenizer.Tokenizer.SplitMode.B,
"C": tokenizer.Tokenizer.SplitMode.C,
}[split_mode]
tok = dictionary.Dictionary().create(mode=split_mode)
return tok
except ImportError:
raise ImportError(
"Japanese support requires SudachiPy and SudachiDict-core "
"(https://github.com/WorksApplications/SudachiPy). "
"Install with `pip install sudachipy sudachidict_core` or "
"install spaCy with `pip install spacy[ja]`."
) from None
def resolve_pos(orth, tag, next_tag):
"""If necessary, add a field to the POS tag for UD mapping.
Under Universal Dependencies, sometimes the same Unidic POS tag can
be mapped differently depending on the literal token or its context
in the sentence. This function returns resolved POSs for both token
and next_token by tuple.
"""
# Some tokens have their UD tag decided based on the POS of the following
# token.
# apply orth based mapping
if tag in TAG_ORTH_MAP:
orth_map = TAG_ORTH_MAP[tag]
if orth in orth_map:
return orth_map[orth], None # current_pos, next_pos
# apply tag bi-gram mapping
if next_tag:
tag_bigram = tag, next_tag
if tag_bigram in TAG_BIGRAM_MAP:
current_pos, next_pos = TAG_BIGRAM_MAP[tag_bigram]
if current_pos is None: # apply tag uni-gram mapping for current_pos
return (
TAG_MAP[tag][POS],
next_pos,
) # only next_pos is identified by tag bi-gram mapping
else:
return current_pos, next_pos
# apply tag uni-gram mapping
return TAG_MAP[tag][POS], None
def get_dtokens_and_spaces(dtokens, text, gap_tag="空白"):
# Compare the content of tokens and text, first
words = [x.surface for x in dtokens]
if "".join("".join(words).split()) != "".join(text.split()):
raise ValueError(Errors.E194.format(text=text, words=words))
text_dtokens = []
text_spaces = []
text_pos = 0
# handle empty and whitespace-only texts
if len(words) == 0:
return text_dtokens, text_spaces
elif len([word for word in words if not word.isspace()]) == 0:
assert text.isspace()
text_dtokens = [DetailedToken(text, gap_tag, "", text, text, None, None)]
text_spaces = [False]
return text_dtokens, text_spaces
# align words and dtokens by referring text, and insert gap tokens for the space char spans
for i, (word, dtoken) in enumerate(zip(words, dtokens)):
# skip all space tokens
if word.isspace():
continue
try:
word_start = text[text_pos:].index(word)
except ValueError:
raise ValueError(Errors.E194.format(text=text, words=words)) from None
# space token
if word_start > 0:
w = text[text_pos : text_pos + word_start]
text_dtokens.append(DetailedToken(w, gap_tag, "", w, w, None, None))
text_spaces.append(False)
text_pos += word_start
# content word
text_dtokens.append(dtoken)
text_spaces.append(False)
text_pos += len(word)
# poll a space char after the word
if i + 1 < len(dtokens) and dtokens[i + 1].surface == " ":
text_spaces[-1] = True
text_pos += 1
# trailing space token
if text_pos < len(text):
w = text[text_pos:]
text_dtokens.append(DetailedToken(w, gap_tag, "", w, w, None, None))
text_spaces.append(False)
return text_dtokens, text_spaces
__all__ = ["Japanese"]
| Japanese |
python | coleifer__peewee | peewee.py | {
"start": 111716,
"end": 126889
} | class ____(Database):
field_types = {
'BIGAUTO': FIELD.AUTO,
'BIGINT': FIELD.INT,
'BOOL': FIELD.INT,
'DOUBLE': FIELD.FLOAT,
'SMALLINT': FIELD.INT,
'UUID': FIELD.TEXT}
operations = {
'LIKE': 'GLOB',
'ILIKE': 'LIKE'}
index_schema_prefix = True
limit_max = -1
server_version = __sqlite_version__
truncate_table = False
def __init__(self, database, *args, **kwargs):
self._pragmas = kwargs.pop('pragmas', ())
super(SqliteDatabase, self).__init__(database, *args, **kwargs)
self._aggregates = {}
self._collations = {}
self._functions = {}
self._window_functions = {}
self._table_functions = []
self._extensions = set()
self._attached = {}
self.register_function(_sqlite_date_part, 'date_part', 2)
self.register_function(_sqlite_date_trunc, 'date_trunc', 2)
self.nulls_ordering = self.server_version >= (3, 30, 0)
def init(self, database, pragmas=None, timeout=5, returning_clause=None,
**kwargs):
if pragmas is not None:
self._pragmas = pragmas
if isinstance(self._pragmas, dict):
self._pragmas = list(self._pragmas.items())
if returning_clause is not None:
if __sqlite_version__ < (3, 35, 0):
warnings.warn('RETURNING clause requires Sqlite 3.35 or newer')
self.returning_clause = returning_clause
self._timeout = timeout
super(SqliteDatabase, self).init(database, **kwargs)
def _set_server_version(self, conn):
pass
def _connect(self):
if sqlite3 is None:
raise ImproperlyConfigured('SQLite driver not installed!')
conn = sqlite3.connect(self.database, timeout=self._timeout,
isolation_level=None, **self.connect_params)
try:
self._add_conn_hooks(conn)
except:
conn.close()
raise
return conn
def _add_conn_hooks(self, conn):
if self._attached:
self._attach_databases(conn)
if self._pragmas:
self._set_pragmas(conn)
self._load_aggregates(conn)
self._load_collations(conn)
self._load_functions(conn)
if self.server_version >= (3, 25, 0):
self._load_window_functions(conn)
if self._table_functions:
for table_function in self._table_functions:
table_function.register(conn)
if self._extensions:
self._load_extensions(conn)
def _set_pragmas(self, conn):
cursor = conn.cursor()
for pragma, value in self._pragmas:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
cursor.close()
def _attach_databases(self, conn):
cursor = conn.cursor()
for name, db in self._attached.items():
cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name))
cursor.close()
def pragma(self, key, value=SENTINEL, permanent=False, schema=None):
if schema is not None:
key = '"%s".%s' % (schema, key)
sql = 'PRAGMA %s' % key
if value is not SENTINEL:
sql += ' = %s' % (value or 0)
if permanent:
pragmas = dict(self._pragmas or ())
pragmas[key] = value
self._pragmas = list(pragmas.items())
elif permanent:
raise ValueError('Cannot specify a permanent pragma without value')
row = self.execute_sql(sql).fetchone()
if row:
return row[0]
cache_size = __pragma__('cache_size')
foreign_keys = __pragma__('foreign_keys')
journal_mode = __pragma__('journal_mode')
journal_size_limit = __pragma__('journal_size_limit')
mmap_size = __pragma__('mmap_size')
page_size = __pragma__('page_size')
read_uncommitted = __pragma__('read_uncommitted')
synchronous = __pragma__('synchronous')
wal_autocheckpoint = __pragma__('wal_autocheckpoint')
application_id = __pragma__('application_id')
user_version = __pragma__('user_version')
data_version = __pragma__('data_version')
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, seconds):
if self._timeout == seconds:
return
self._timeout = seconds
if not self.is_closed():
# PySQLite multiplies user timeout by 1000, but the unit of the
# timeout PRAGMA is actually milliseconds.
self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000))
def _load_aggregates(self, conn):
for name, (klass, num_params) in self._aggregates.items():
conn.create_aggregate(name, num_params, klass)
def _load_collations(self, conn):
for name, fn in self._collations.items():
conn.create_collation(name, fn)
def _load_functions(self, conn):
for name, (fn, n_params, deterministic) in self._functions.items():
kwargs = {'deterministic': deterministic} if deterministic else {}
conn.create_function(name, n_params, fn, **kwargs)
def _load_window_functions(self, conn):
for name, (klass, num_params) in self._window_functions.items():
conn.create_window_function(name, num_params, klass)
def register_aggregate(self, klass, name=None, num_params=-1):
self._aggregates[name or klass.__name__.lower()] = (klass, num_params)
if not self.is_closed():
self._load_aggregates(self.connection())
def aggregate(self, name=None, num_params=-1):
def decorator(klass):
self.register_aggregate(klass, name, num_params)
return klass
return decorator
def register_collation(self, fn, name=None):
name = name or fn.__name__
def _collation(*args):
expressions = args + (SQL('collate %s' % name),)
return NodeList(expressions)
fn.collation = _collation
self._collations[name] = fn
if not self.is_closed():
self._load_collations(self.connection())
def collation(self, name=None):
def decorator(fn):
self.register_collation(fn, name)
return fn
return decorator
def register_function(self, fn, name=None, num_params=-1,
deterministic=None):
self._functions[name or fn.__name__] = (fn, num_params, deterministic)
if not self.is_closed():
self._load_functions(self.connection())
def func(self, name=None, num_params=-1, deterministic=None):
def decorator(fn):
self.register_function(fn, name, num_params, deterministic)
return fn
return decorator
def register_window_function(self, klass, name=None, num_params=-1):
name = name or klass.__name__.lower()
self._window_functions[name] = (klass, num_params)
if not self.is_closed():
self._load_window_functions(self.connection())
def window_function(self, name=None, num_params=-1):
def decorator(klass):
self.register_window_function(klass, name, num_params)
return klass
return decorator
def register_table_function(self, klass, name=None):
if name is not None:
klass.name = name
self._table_functions.append(klass)
if not self.is_closed():
klass.register(self.connection())
def table_function(self, name=None):
def decorator(klass):
self.register_table_function(klass, name)
return klass
return decorator
def unregister_aggregate(self, name):
del(self._aggregates[name])
def unregister_collation(self, name):
del(self._collations[name])
def unregister_function(self, name):
del(self._functions[name])
def unregister_window_function(self, name):
del(self._window_functions[name])
def unregister_table_function(self, name):
for idx, klass in enumerate(self._table_functions):
if klass.name == name:
break
else:
return False
self._table_functions.pop(idx)
return True
def _load_extensions(self, conn):
conn.enable_load_extension(True)
for extension in self._extensions:
conn.load_extension(extension)
def load_extension(self, extension):
self._extensions.add(extension)
if not self.is_closed():
conn = self.connection()
conn.enable_load_extension(True)
conn.load_extension(extension)
def unload_extension(self, extension):
self._extensions.remove(extension)
def attach(self, filename, name):
if name in self._attached:
if self._attached[name] == filename:
return False
raise OperationalError('schema "%s" already attached.' % name)
self._attached[name] = filename
if not self.is_closed():
self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name))
return True
def detach(self, name):
if name not in self._attached:
return False
del self._attached[name]
if not self.is_closed():
self.execute_sql('DETACH DATABASE "%s"' % name)
return True
def last_insert_id(self, cursor, query_type=None):
if not self.returning_clause:
return cursor.lastrowid
elif query_type == Insert.SIMPLE:
try:
return cursor[0][0]
except (IndexError, KeyError, TypeError):
pass
return cursor
def rows_affected(self, cursor):
try:
return cursor.rowcount
except AttributeError:
return cursor.cursor.rowcount # This was a RETURNING query.
def begin(self, lock_type=None):
statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN'
self.execute_sql(statement)
def commit(self):
with __exception_wrapper__:
return self._state.conn.commit()
def rollback(self):
with __exception_wrapper__:
return self._state.conn.rollback()
def get_tables(self, schema=None):
schema = schema or 'main'
cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE '
'type=? ORDER BY name' % schema, ('table',))
return [row for row, in cursor.fetchall()]
def get_views(self, schema=None):
sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? '
'ORDER BY name') % (schema or 'main')
return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))]
def get_indexes(self, table, schema=None):
schema = schema or 'main'
query = ('SELECT name, sql FROM "%s".sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema
cursor = self.execute_sql(query, (table, 'index'))
index_to_sql = dict(cursor.fetchall())
# Determine which indexes have a unique constraint.
unique_indexes = set()
cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' %
(schema, table))
for row in cursor.fetchall():
name = row[1]
is_unique = int(row[2]) == 1
if is_unique:
unique_indexes.add(name)
# Retrieve the indexed columns.
index_columns = {}
for index_name in sorted(index_to_sql):
cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' %
(schema, index_name))
index_columns[index_name] = [row[2] for row in cursor.fetchall()]
return [
IndexMetadata(
name,
index_to_sql[name],
index_columns[name],
name in unique_indexes,
table)
for name in sorted(index_to_sql)]
def get_columns(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
(schema or 'main', table))
return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4])
for r in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
(schema or 'main', table))
return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' %
(schema or 'main', table))
return [ForeignKeyMetadata(row[3], row[2], row[4], table)
for row in cursor.fetchall()]
def get_binary_type(self):
return sqlite3.Binary
def conflict_statement(self, on_conflict, query):
action = on_conflict._action.lower() if on_conflict._action else ''
if action and action not in ('nothing', 'update'):
return SQL('INSERT OR %s' % on_conflict._action.upper())
def conflict_update(self, oc, query):
# Sqlite prior to 3.24.0 does not support Postgres-style upsert.
if self.server_version < (3, 24, 0) and \
any((oc._preserve, oc._update, oc._where, oc._conflict_target,
oc._conflict_constraint)):
raise ValueError('SQLite does not support specifying which values '
'to preserve or update.')
action = oc._action.lower() if oc._action else ''
if action and action not in ('nothing', 'update', ''):
return
if action == 'nothing':
return SQL('ON CONFLICT DO NOTHING')
elif not oc._update and not oc._preserve:
raise ValueError('If you are not performing any updates (or '
'preserving any INSERTed values), then the '
'conflict resolution action should be set to '
'"NOTHING".')
elif oc._conflict_constraint:
raise ValueError('SQLite does not support specifying named '
'constraints for conflict resolution.')
elif not oc._conflict_target:
raise ValueError('SQLite requires that a conflict target be '
'specified when doing an upsert.')
return self._build_on_conflict_update(oc, query)
def extract_date(self, date_part, date_field):
return fn.date_part(date_part, date_field, python_value=int)
def truncate_date(self, date_part, date_field):
return fn.date_trunc(date_part, date_field,
python_value=simple_date_time)
def to_timestamp(self, date_field):
return fn.strftime('%s', date_field).cast('integer')
def from_timestamp(self, date_field):
return fn.datetime(date_field, 'unixepoch')
| SqliteDatabase |
python | pytorch__pytorch | test/test_xpu.py | {
"start": 31848,
"end": 33214
} | class ____(TestCase):
def test_is_bf16_supported(self):
self.assertEqual(
torch.xpu.is_bf16_supported(including_emulation=True),
torch.xpu.is_available(),
)
def test_is_tf32_supported(self):
if not torch.xpu.is_available():
self.assertFalse(torch.xpu.is_tf32_supported())
def test_get_arch_list(self):
if not torch.xpu._is_compiled():
self.assertEqual(len(torch.xpu.get_arch_list()), 0)
def test_torch_config_for_xpu(self):
config = torch.__config__.show()
value = re.search(r"USE_XPU=([^,]+)", config)
self.assertIsNotNone(value)
if torch.xpu._is_compiled():
self.assertTrue(value.group(1) in ["ON", "1"])
value = re.search(r"USE_XCCL=([^,]+)", config)
if torch.distributed.is_xccl_available():
self.assertTrue(value.group(1) in ["ON", "1"])
else:
self.assertTrue(value.group(1) in ["OFF", "0"])
else:
self.assertTrue(value.group(1) in ["OFF", "0"])
self.assertFalse(torch.distributed.is_xccl_available())
value = re.search(r"USE_XCCL=([^,]+)", config)
self.assertIsNotNone(value)
self.assertTrue(value.group(1) in ["OFF", "0"])
if __name__ == "__main__":
run_tests()
| TestXPUAPISanity |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 146224,
"end": 147151
} | class ____(sgqlc.types.Input):
"""Information about a sponsorship to make for a user or organization
with a GitHub Sponsors profile, as part of sponsoring many users
or organizations at once.
"""
__schema__ = github_schema
__field_names__ = ("sponsorable_id", "sponsorable_login", "amount")
sponsorable_id = sgqlc.types.Field(ID, graphql_name="sponsorableId")
"""The ID of the user or organization who is receiving the
sponsorship. Required if sponsorableLogin is not given.
"""
sponsorable_login = sgqlc.types.Field(String, graphql_name="sponsorableLogin")
"""The username of the user or organization who is receiving the
sponsorship. Required if sponsorableId is not given.
"""
amount = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="amount")
"""The amount to pay to the sponsorable in US dollars. Valid values:
1-12000.
"""
| BulkSponsorship |
python | getsentry__sentry | src/sentry/incidents/logic.py | {
"start": 44946,
"end": 58996
} | class ____(Exception):
def __init__(self, project_slugs: Collection[str]) -> None:
self.project_slugs = project_slugs
def create_alert_rule_trigger(
alert_rule: AlertRule,
label: str,
alert_threshold: int | float,
) -> AlertRuleTrigger:
"""
Creates a new AlertRuleTrigger
:param alert_rule: The alert rule to create the trigger for
:param label: A description of the trigger
:param alert_threshold: Value that the subscription needs to reach to trigger the
alert rule
trigger. These projects must be associate with the alert rule already
:return: The created AlertRuleTrigger
"""
if AlertRuleTrigger.objects.filter(alert_rule=alert_rule, label=label).exists():
raise AlertRuleTriggerLabelAlreadyUsedError()
if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC and alert_threshold != 0:
raise ValidationError(INVALID_ALERT_THRESHOLD)
with transaction.atomic(router.db_for_write(AlertRuleTrigger)):
trigger = AlertRuleTrigger.objects.create(
alert_rule=alert_rule, label=label, alert_threshold=alert_threshold
)
return trigger
def update_alert_rule_trigger(
trigger: AlertRuleTrigger,
label: str | None = None,
alert_threshold: int | float | None = None,
) -> AlertRuleTrigger:
"""
:param trigger: The AlertRuleTrigger to update
:param label: A description of the trigger
:param alert_threshold: Value that the subscription needs to reach to trigger the
alert rule
:return: The updated AlertRuleTrigger
"""
if (
AlertRuleTrigger.objects.filter(alert_rule=trigger.alert_rule, label=label)
.exclude(id=trigger.id)
.exists()
):
raise AlertRuleTriggerLabelAlreadyUsedError()
if trigger.alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC and alert_threshold != 0:
raise ValidationError(INVALID_ALERT_THRESHOLD)
updated_fields: dict[str, Any] = {}
if label is not None:
updated_fields["label"] = label
if alert_threshold is not None:
updated_fields["alert_threshold"] = alert_threshold
with transaction.atomic(router.db_for_write(AlertRuleTrigger)):
if updated_fields:
trigger.update(**updated_fields)
return trigger
def delete_alert_rule_trigger(trigger: AlertRuleTrigger) -> None:
"""
Deletes an AlertRuleTrigger
"""
trigger.delete()
def get_triggers_for_alert_rule(alert_rule: AlertRule) -> QuerySet[AlertRuleTrigger]:
return AlertRuleTrigger.objects.filter(alert_rule=alert_rule)
def _trigger_incident_triggers(incident: Incident) -> None:
incident_triggers = IncidentTrigger.objects.filter(incident=incident)
triggers = get_triggers_for_alert_rule(incident.alert_rule)
actions = deduplicate_trigger_actions(triggers=list(triggers))
with transaction.atomic(router.db_for_write(AlertRuleTrigger)):
for trigger in incident_triggers:
trigger.status = TriggerStatus.RESOLVED.value
trigger.save()
for action in actions:
for project in incident.projects.all():
_schedule_trigger_action(
action_id=action.id,
incident_id=incident.id,
project_id=project.id,
method="resolve",
new_status=IncidentStatus.CLOSED.value,
)
def _schedule_trigger_action(
action_id: int, incident_id: int, project_id: int, method: str, new_status: int
) -> None:
from sentry.incidents.tasks import handle_trigger_action
transaction.on_commit(
lambda: handle_trigger_action.delay(
action_id=action_id,
incident_id=incident_id,
project_id=project_id,
method=method,
new_status=new_status,
),
using=router.db_for_write(AlertRuleTrigger),
)
def _sort_by_priority_list(
triggers: Collection[AlertRuleTrigger],
) -> list[AlertRuleTrigger]:
priority_dict = {
WARNING_TRIGGER_LABEL: 0,
CRITICAL_TRIGGER_LABEL: 1,
}
return sorted(
triggers,
key=lambda t: priority_dict.get(t.label, len(triggers) + t.id),
)
def _prioritize_actions(
triggers: Collection[AlertRuleTrigger],
) -> list[AlertRuleTriggerAction]:
"""
Function that given an input array of AlertRuleTriggers, prioritizes those triggers
based on their label, and then re-orders actions based on that ordering
Inputs:
* triggers: Array of instances of `AlertRuleTrigger`
Returns:
List of instances of `AlertRuleTriggerAction` that are ordered according to the ordering
of related prioritized instances of `AlertRuleTrigger`
"""
actions = list(
AlertRuleTriggerAction.objects.filter(alert_rule_trigger__in=triggers).select_related(
"alert_rule_trigger"
)
)
triggers = _sort_by_priority_list(triggers=triggers)
triggers_dict = {t.id: idx for idx, t in enumerate(triggers)}
sorted_actions = sorted(
actions,
key=lambda action: triggers_dict.get(
action.alert_rule_trigger.id, len(actions) + action.id
),
)
return sorted_actions
def deduplicate_trigger_actions(
triggers: Collection[AlertRuleTrigger],
) -> list[AlertRuleTriggerAction]:
"""
Given a list of alert rule triggers, we fetch actions, this returns a list of actions that is
unique on (type, target_type, target_identifier, integration_id, sentry_app_id). If there are
duplicate actions, we'll prefer the action from a warning trigger over a critical
trigger. If there are duplicate actions on a single trigger, we'll just choose
one arbitrarily.
:param triggers: A list of `AlertRuleTrigger` instances from the same `AlertRule`
:return: A list of deduplicated `AlertRuleTriggerAction` instances.
"""
actions = _prioritize_actions(triggers=triggers)
deduped: dict[tuple[int, int, str | None, int | None, int | None], AlertRuleTriggerAction] = {}
for action in actions:
key = (
action.type,
action.target_type,
action.target_identifier,
action.integration_id,
action.sentry_app_id,
)
deduped.setdefault(key, action)
return list(deduped.values())
def _get_subscriptions_from_alert_rule(
alert_rule: AlertRule, projects: Collection[Project]
) -> Iterable[QuerySubscription]:
"""
Fetches subscriptions associated with an alert rule filtered by a list of projects.
Raises `ProjectsNotAssociatedWithAlertRuleError` if Projects aren't associated with
the AlertRule
:param alert_rule: The AlertRule to fetch subscriptions for
:param projects: The Project we want subscriptions for
:return: A list of QuerySubscriptions
"""
excluded_subscriptions = _unpack_snuba_query(alert_rule).subscriptions.filter(
project__in=projects
)
if len(excluded_subscriptions) != len(projects):
invalid_slugs = {p.slug for p in projects} - {
s.project.slug for s in excluded_subscriptions
}
raise ProjectsNotAssociatedWithAlertRuleError(invalid_slugs)
return excluded_subscriptions
def create_alert_rule_trigger_action(
trigger: AlertRuleTrigger,
type: ActionService,
target_type: ActionTarget,
target_identifier: str | None = None,
integration_id: int | None = None,
sentry_app_id: int | None = None,
use_async_lookup: bool = False,
input_channel_id: str | None = None,
sentry_app_config: dict[str, Any] | None = None,
installations: list[RpcSentryAppInstallation] | None = None,
integrations: list[RpcIntegration] | None = None,
priority: str | None = None,
) -> AlertRuleTriggerAction:
"""
Creates an AlertRuleTriggerAction
:param trigger: The trigger to create the action on
:param type: Which sort of action to take
:param target_type: Which type of target to send to
:param target_identifier: (Optional) The identifier of the target
:param integration_id: (Optional) The Integration related to this action.
:param sentry_app_id: (Optional) The Sentry App related to this action.
:param use_async_lookup: (Optional) Longer lookup for the Slack channel async job
:param input_channel_id: (Optional) Slack channel ID. If provided skips lookup
:return: The created action
"""
target_display: str | None = None
if type.value in AlertRuleTriggerAction.EXEMPT_SERVICES:
raise InvalidTriggerActionError("Selected notification service is exempt from alert rules")
if type.value in AlertRuleTriggerAction.INTEGRATION_TYPES:
if target_type != AlertRuleTriggerAction.TargetType.SPECIFIC:
raise InvalidTriggerActionError("Must specify specific target type")
target = get_target_identifier_display_for_integration(
type,
target_identifier,
_unpack_organization(trigger.alert_rule),
integration_id,
use_async_lookup=use_async_lookup,
input_channel_id=input_channel_id,
integrations=integrations,
)
elif type == AlertRuleTriggerAction.Type.SENTRY_APP:
target = _get_alert_rule_trigger_action_sentry_app(
_unpack_organization(trigger.alert_rule), sentry_app_id, installations
)
else:
target = AlertTarget(target_identifier, target_display)
# store priority in the json sentry_app_config
if priority is not None and type in [
ActionService.PAGERDUTY,
ActionService.OPSGENIE,
]:
if sentry_app_config:
sentry_app_config.update({"priority": priority})
else:
sentry_app_config = {"priority": priority}
with transaction.atomic(router.db_for_write(AlertRuleTriggerAction)):
trigger_action = AlertRuleTriggerAction.objects.create(
alert_rule_trigger=trigger,
type=type.value,
target_type=target_type.value,
target_identifier=str(target.identifier) if target.identifier is not None else None,
target_display=target.display,
integration_id=integration_id,
sentry_app_id=sentry_app_id,
sentry_app_config=sentry_app_config,
)
return trigger_action
def update_alert_rule_trigger_action(
trigger_action: AlertRuleTriggerAction,
type: ActionService | None = None,
target_type: ActionTarget | None = None,
target_identifier: str | None = None,
integration_id: int | None = None,
sentry_app_id: int | None = None,
use_async_lookup: bool = False,
input_channel_id=None,
sentry_app_config=None,
installations: list[RpcSentryAppInstallation] | None = None,
integrations: list[RpcIntegration] | None = None,
priority: str | None = None,
) -> AlertRuleTriggerAction:
"""
Updates values on an AlertRuleTriggerAction
:param trigger_action: The trigger action to update
:param type: Which sort of action to take
:param target_type: Which type of target to send to
:param target_identifier: The identifier of the target
:param integration_id: (Optional) The ID of the Integration related to this action.
:param sentry_app_id: (Optional) The ID of the SentryApp related to this action.
:param use_async_lookup: (Optional) Longer lookup for the Slack channel async job
:param input_channel_id: (Optional) Slack channel ID. If provided skips lookup
:return:
"""
updated_fields: dict[str, Any] = {}
if type is not None:
updated_fields["type"] = type.value
if target_type is not None:
updated_fields["target_type"] = target_type.value
if integration_id is not None:
updated_fields["integration_id"] = integration_id
if sentry_app_id is not None:
updated_fields["sentry_app_id"] = sentry_app_id
if sentry_app_config is not None:
updated_fields["sentry_app_config"] = sentry_app_config
if target_identifier is not None:
type = updated_fields.get("type", trigger_action.type)
if type in AlertRuleTriggerAction.INTEGRATION_TYPES:
integration_id = updated_fields.get("integration_id", trigger_action.integration_id)
organization = _unpack_organization(trigger_action.alert_rule_trigger.alert_rule)
target = get_target_identifier_display_for_integration(
type,
target_identifier,
organization,
integration_id,
use_async_lookup=use_async_lookup,
input_channel_id=input_channel_id,
integrations=integrations,
)
updated_fields["target_display"] = target.display
elif type == AlertRuleTriggerAction.Type.SENTRY_APP.value:
sentry_app_id = updated_fields.get("sentry_app_id", trigger_action.sentry_app_id)
organization = _unpack_organization(trigger_action.alert_rule_trigger.alert_rule)
target = _get_alert_rule_trigger_action_sentry_app(
organization, sentry_app_id, installations
)
updated_fields["target_display"] = target.display
else:
target = AlertTarget(target_identifier, None)
updated_fields["target_identifier"] = target.identifier
# store priority in the json sentry_app_config
if priority is not None and type in [
ActionService.PAGERDUTY,
ActionService.OPSGENIE,
]:
if updated_fields.get("sentry_app_config"):
updated_fields["sentry_app_config"].update({"priority": priority})
else:
updated_fields["sentry_app_config"] = {"priority": priority}
with transaction.atomic(router.db_for_write(AlertRuleTriggerAction)):
trigger_action.update(**updated_fields)
return trigger_action
@dataclass(frozen=True, eq=True)
| ProjectsNotAssociatedWithAlertRuleError |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/equalization.py | {
"start": 266,
"end": 8659
} | class ____(BaseImagePreprocessingLayer):
"""Preprocessing layer for histogram equalization on image channels.
Histogram equalization is a technique to adjust image intensities to
enhance contrast by effectively spreading out the most frequent
intensity values. This layer applies equalization on a channel-wise
basis, which can improve the visibility of details in images.
This layer works with both grayscale and color images, performing
equalization independently on each color channel. At inference time,
the equalization is consistently applied.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
value_range: Optional list/tuple of 2 floats specifying the lower
and upper limits of the input data values. Defaults to `[0, 255]`.
If the input image has been scaled, use the appropriate range
(e.g., `[0.0, 1.0]`). The equalization will be scaled to this
range, and output values will be clipped accordingly.
bins: Integer specifying the number of histogram bins to use for
equalization. Defaults to 256, which is suitable for 8-bit images.
Larger values can provide more granular intensity redistribution.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Example:
```python
# Create an equalization layer for standard 8-bit images
equalizer = keras.layers.Equalization()
# An image with uneven intensity distribution
image = [...] # your input image
# Apply histogram equalization
equalized_image = equalizer(image)
# For images with custom value range
custom_equalizer = keras.layers.Equalization(
value_range=[0.0, 1.0], # for normalized images
bins=128 # fewer bins for more subtle equalization
)
custom_equalized = custom_equalizer(normalized_image)
```
"""
def __init__(
self, value_range=(0, 255), bins=256, data_format=None, **kwargs
):
super().__init__(**kwargs)
self.bins = bins
self._set_value_range(value_range)
self.data_format = backend.standardize_data_format(data_format)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _custom_histogram_fixed_width(self, values, value_range, nbins):
values = self.backend.cast(values, "float32")
value_min, value_max = value_range
value_min = self.backend.cast(value_min, "float32")
value_max = self.backend.cast(value_max, "float32")
scaled = (values - value_min) * (nbins - 1) / (value_max - value_min)
indices = self.backend.cast(scaled, "int32")
indices = self.backend.numpy.clip(indices, 0, nbins - 1)
flat_indices = self.backend.numpy.reshape(indices, [-1])
if backend.backend() == "jax":
# for JAX bincount is never jittable because of output shape
histogram = self.backend.numpy.zeros(nbins, dtype="int32")
for i in range(nbins):
matches = self.backend.cast(
self.backend.numpy.equal(flat_indices, i), "int32"
)
bin_count = self.backend.numpy.sum(matches)
one_hot = self.backend.cast(
self.backend.numpy.arange(nbins) == i, "int32"
)
histogram = histogram + (bin_count * one_hot)
return histogram
else:
# TensorFlow/PyTorch/NumPy implementation using bincount
return self.backend.numpy.bincount(
flat_indices,
minlength=nbins,
)
def _scale_values(self, values, source_range, target_range):
source_min, source_max = source_range
target_min, target_max = target_range
scale = (target_max - target_min) / (source_max - source_min)
offset = target_min - source_min * scale
return values * scale + offset
def _equalize_channel(self, channel, value_range):
if value_range != (0, 255):
channel = self._scale_values(channel, value_range, (0, 255))
hist = self._custom_histogram_fixed_width(
channel, value_range=(0, 255), nbins=self.bins
)
nonzero_bins = self.backend.numpy.count_nonzero(hist)
equalized = self.backend.numpy.where(
nonzero_bins <= 1, channel, self._apply_equalization(channel, hist)
)
if value_range != (0, 255):
equalized = self._scale_values(equalized, (0, 255), value_range)
return equalized
def _apply_equalization(self, channel, hist):
cdf = self.backend.numpy.cumsum(hist)
if self.backend.name == "jax":
mask = cdf > 0
first_nonzero_idx = self.backend.numpy.argmax(mask)
cdf_min = self.backend.numpy.take(cdf, first_nonzero_idx)
else:
cdf_min = self.backend.numpy.take(
cdf, self.backend.numpy.nonzero(cdf)[0][0]
)
denominator = cdf[-1] - cdf_min
denominator = self.backend.numpy.where(
denominator == 0,
self.backend.numpy.ones_like(1, dtype=denominator.dtype),
denominator,
)
lookup_table = ((cdf - cdf_min) * 255) / denominator
lookup_table = self.backend.numpy.clip(
self.backend.numpy.round(lookup_table), 0, 255
)
scaled_channel = (channel / 255.0) * (self.bins - 1)
indices = self.backend.cast(
self.backend.numpy.clip(scaled_channel, 0, self.bins - 1), "int32"
)
return self.backend.numpy.take(lookup_table, indices)
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
if self.data_format == "channels_first":
channels = []
for i in range(self.backend.core.shape(images)[-3]):
channel = images[..., i, :, :]
equalized = self._equalize_channel(
channel, self.value_range
)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-3)
else:
channels = []
for i in range(self.backend.core.shape(images)[-1]):
channel = images[..., i]
equalized = self._equalize_channel(
channel, self.value_range
)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-1)
return self.backend.cast(equalized_images, self.compute_dtype)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config
| Equalization |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 75778,
"end": 76699
} | class ____(Request):
"""
Get the tasks's latest scalar values
:param task: Task ID
:type task: str
"""
_service = "events"
_action = "get_task_latest_scalar_values"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, **kwargs: Any) -> None:
super(GetTaskLatestScalarValuesRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetTaskLatestScalarValuesRequest |
python | pytorch__pytorch | torch/distributed/_tools/ilp_utils.py | {
"start": 6807,
"end": 10094
} | class ____:
def __init__(self, n: int) -> None:
self.nodes: list[Node] = []
self.name2node: dict[str, Node] = {}
self.ad_matrix = np.zeros((n, n))
self.fw_post_order: list[str] = []
def add_node(self, node: Node) -> None:
self.nodes.append(node)
self.name2node[node["fqn"]] = node
def parse_module_info(module_info: ModuleInfo) -> Graph:
"""
Parse module info and create a graph (tree) of modules. The graph will be
used by MILP solver to find optimal SAC and/or FSDP configurations.
"""
mod_stats = module_info["mod_stats"]
fw_pre_order = module_info["mod_order"]["fw_pre_order"]
# assertion and number of nodes
assert len(mod_stats) == len(fw_pre_order)
n_nodes = len(mod_stats)
# create graph
g = Graph(n_nodes)
g.fw_post_order = module_info["mod_order"]["fw_post_order"]
# sort the modules by pre-order and add them to the graph
module_info["mod_stats"] = sorted(
mod_stats, key=lambda x: fw_pre_order.index(x["fqn"])
)
for i, one_mod_stats in enumerate(mod_stats):
node: Node = cast(Node, one_mod_stats)
node["index"] = i
node["pos_fw_post_order"] = g.fw_post_order.index(node["fqn"])
g.add_node(node)
# set up ancestor-descendant matrix
for i in range(n_nodes):
for j in range(i, n_nodes):
if is_self_or_submodule(g.nodes[j]["fqn"], g.nodes[i]["fqn"]):
g.ad_matrix[i][j] = 1
else:
break
return g
def is_self_or_submodule(name_descendant: str, name_ancestor: str) -> bool:
"""
check if name_descendant is a submodule of name_ancestor, or if they are the same
"""
return name_descendant == name_ancestor or name_ancestor + "." in name_descendant
def is_submodule(name_descendant: str, name_ancestor: str) -> bool:
"""
if name_descendant is a submodule of name_ancestor, but not the same
"""
return name_ancestor + "." in name_descendant
def display_bytes(b: int, unit: str = "MiB") -> str:
"""
return a string that represent the number of bytes in a desired unit
"""
if unit == "KiB":
return f"{b / 2**10:.2f} KiB"
if unit == "MiB":
return f"{b / 2**20:.2f} MiB"
if unit == "GiB":
return f"{b / 2**30:.2f} GiB"
return f"{b:.2f} bytes"
def get_peak_memory_runtime_baseline(graph: Graph) -> tuple[int, float]:
"""
Get the baseline peak memory and runtime.
Baseline here means there is no FSDP or AC.
Memory includes the parameters, gradients, activations, and activation gradients.
Memory does not include e.g., optimizer states, embedding tables, etc.
Returns:
int: peak memory in bytes
float: compute time in ms
"""
P_1 = graph.nodes[0]["param_per_module"]
num_nodes = len(graph.nodes)
peak_mem = 0
for i in range(num_nodes):
TG_i = graph.nodes[i]["grad_total"]
AG_i = graph.nodes[i]["act_grad_per_module"]
TA_i = graph.nodes[i]["act_total"]
peak_mem = max(peak_mem, P_1 + TG_i + AG_i + TA_i)
compute_time = (
graph.nodes[0]["fw_runtime_per_module"]
+ graph.nodes[0]["bw_runtime_per_module"]
)
return (peak_mem, compute_time)
| Graph |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 224116,
"end": 224599
} | class ____(sgqlc.types.Input):
"""Ordering options for discussion poll option connections."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(DiscussionPollOptionOrderField), graphql_name="field")
"""The field to order poll options by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| DiscussionPollOptionOrder |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/executors/kubernetes_executor_types.py | {
"start": 1090,
"end": 1488
} | class ____(TypedDict, total=False):
"""Detailed information about pod/container failure."""
pod_status: str | None
pod_reason: str | None
pod_message: str | None
container_state: str | None
container_reason: str | None
container_message: str | None
exit_code: int | None
container_type: Literal["init", "main"] | None
container_name: str | None
| FailureDetails |
python | spack__spack | lib/spack/spack/test/conftest.py | {
"start": 65897,
"end": 77326
} | class ____:
has_code = False
name = "mock-bundle"
@pytest.fixture
def mock_directive_bundle():
"""Return a mock bundle package for directive tests."""
return MockBundle()
@pytest.fixture
def clear_directive_functions():
"""Clear all overidden directive functions for subsequent tests."""
yield
# Make sure any directive functions overidden by tests are cleared before
# proceeding with subsequent tests that may depend on the original
# functions.
spack.directives_meta.DirectiveMeta._directives_to_be_executed = []
@pytest.fixture
def mock_executable(tmp_path: Path):
"""Factory to create a mock executable in a temporary directory that
output a custom string when run.
"""
shebang = "#!/bin/sh\n" if sys.platform != "win32" else "@ECHO OFF\n"
def _factory(name, output, subdir=("bin",)):
executable_dir = tmp_path.joinpath(*subdir)
executable_dir.mkdir(parents=True, exist_ok=True)
executable_path = executable_dir / name
if sys.platform == "win32":
executable_path = executable_dir / (name + ".bat")
executable_path.write_text(f"{shebang}{output}\n")
executable_path.chmod(0o755)
return executable_path
return _factory
@pytest.fixture()
def mock_test_stage(mutable_config, tmp_path: Path):
# NOTE: This fixture MUST be applied after any fixture that uses
# the config fixture under the hood
# No need to unset because we use mutable_config
tmp_stage = str(tmp_path / "test_stage")
mutable_config.set("config:test_stage", tmp_stage)
yield tmp_stage
@pytest.fixture(autouse=True)
def inode_cache():
spack.llnl.util.lock.FILE_TRACKER.purge()
yield
# TODO: it is a bug when the file tracker is non-empty after a test,
# since it means a lock was not released, or the inode was not purged
# when acquiring the lock failed. So, we could assert that here, but
# currently there are too many issues to fix, so look for the more
# serious issue of having a closed file descriptor in the cache.
assert not any(f.fh.closed for f in spack.llnl.util.lock.FILE_TRACKER._descriptors.values())
spack.llnl.util.lock.FILE_TRACKER.purge()
@pytest.fixture(autouse=True)
def brand_new_binary_cache():
yield
spack.binary_distribution.BINARY_INDEX = spack.llnl.util.lang.Singleton(
spack.binary_distribution.BinaryCacheIndex
)
@pytest.fixture()
def noncyclical_dir_structure(tmp_path: Path):
"""
Create some non-trivial directory structure with
symlinks to dirs and dangling symlinks, but no cycles::
.
|-- a/
| |-- d/
| |-- file_1
| |-- to_file_1 -> file_1
| `-- to_c -> ../c
|-- b -> a
|-- c/
| |-- dangling_link -> nowhere
| `-- file_2
`-- file_3
"""
d = tmp_path / "nontrivial-dir"
d.mkdir()
j = os.path.join
with working_dir(str(d)):
os.mkdir(j("a"))
os.mkdir(j("a", "d"))
with open(j("a", "file_1"), "wb"):
pass
os.symlink(j("file_1"), j("a", "to_file_1"))
os.symlink(j("..", "c"), j("a", "to_c"))
os.symlink(j("a"), j("b"))
os.mkdir(j("c"))
os.symlink(j("nowhere"), j("c", "dangling_link"))
with open(j("c", "file_2"), "wb"):
pass
with open(j("file_3"), "wb"):
pass
yield d
@pytest.fixture(scope="function")
def mock_config_data():
config_data_dir = os.path.join(spack.paths.test_path, "data", "config")
return config_data_dir, os.listdir(config_data_dir)
@pytest.fixture(scope="function")
def mock_curl_configs(mock_config_data, monkeypatch):
"""
Mock curl-based retrieval of configuration files from the web by grabbing
them from the test data configuration directory.
Fetches a single (configuration) file if the name matches one in the test
data directory.
"""
config_data_dir, config_files = mock_config_data
class MockCurl:
def __init__(self):
self.returncode = None
def __call__(self, *args, **kwargs):
url = [a for a in args if a.startswith("http")][0]
basename = os.path.basename(url)
if os.path.splitext(url)[1]:
if basename in config_files:
filename = os.path.join(config_data_dir, basename)
with open(filename, "r", encoding="utf-8") as f:
lines = f.readlines()
write_file(os.path.basename(filename), "".join(lines))
self.returncode = 0
else:
# This is a "404" and is technically only returned if -f
# flag is provided to curl.
tty.msg("curl: (22) The requested URL returned error: 404")
self.returncode = 22
monkeypatch.setattr(spack.util.web, "require_curl", MockCurl)
@pytest.fixture(scope="function")
def mock_fetch_url_text(mock_config_data, monkeypatch):
"""Mock spack.util.web.fetch_url_text."""
stage_dir, config_files = mock_config_data
def _fetch_text_file(url, dest_dir):
raw_url = raw_github_gitlab_url(url)
mkdirp(dest_dir)
basename = os.path.basename(raw_url)
src = join_path(stage_dir, basename)
dest = join_path(dest_dir, basename)
copy(src, dest)
return dest
monkeypatch.setattr(spack.util.web, "fetch_url_text", _fetch_text_file)
@pytest.fixture(scope="function")
def mock_tty_stdout(monkeypatch):
monkeypatch.setattr(sys.stdout, "isatty", lambda: True)
@pytest.fixture
def prefix_like():
return "package-0.0.0.a1-hashhashhashhashhashhashhashhash"
@pytest.fixture()
def prefix_tmpdir(tmp_path: Path, prefix_like: str):
prefix_dir = tmp_path / prefix_like
prefix_dir.mkdir()
return prefix_dir
@pytest.fixture()
def binary_with_rpaths(prefix_tmpdir: Path):
"""Factory fixture that compiles an ELF binary setting its RPATH. Relative
paths are encoded with `$ORIGIN` prepended.
"""
def _factory(rpaths, message="Hello world!", dynamic_linker="/lib64/ld-linux.so.2"):
source = prefix_tmpdir / "main.c"
source.write_text(
"""
#include <stdio.h>
int main(){{
printf("{0}");
}}
""".format(
message
)
)
gcc = spack.util.executable.which("gcc", required=True)
executable = source.parent / "main.x"
# Encode relative RPATHs using `$ORIGIN` as the root prefix
rpaths = [x if os.path.isabs(x) else os.path.join("$ORIGIN", x) for x in rpaths]
opts = [
"-Wl,--disable-new-dtags",
f"-Wl,-rpath={':'.join(rpaths)}",
f"-Wl,--dynamic-linker,{dynamic_linker}",
str(source),
"-o",
str(executable),
]
gcc(*opts)
return executable
return _factory
@pytest.fixture(scope="session")
def concretized_specs_cache():
"""Cache for mock concrete specs"""
return {}
@pytest.fixture
def default_mock_concretization(config, mock_packages, concretized_specs_cache):
"""Return the default mock concretization of a spec literal, obtained using the mock
repository and the mock configuration.
This fixture is unsafe to call in a test when either the default configuration or mock
repository are not used or have been modified.
"""
def _func(spec_str, tests=False):
key = spec_str, tests
if key not in concretized_specs_cache:
concretized_specs_cache[key] = spack.concretize.concretize_one(
spack.spec.Spec(spec_str), tests=tests
)
return concretized_specs_cache[key].copy()
return _func
@pytest.fixture
def shell_as(shell):
if sys.platform != "win32":
yield
return
if shell not in ("pwsh", "bat"):
raise RuntimeError("Shell must be one of supported Windows shells (pwsh|bat)")
try:
# fetch and store old shell type
_shell = os.environ.get("SPACK_SHELL", None)
os.environ["SPACK_SHELL"] = shell
yield
finally:
# restore old shell if one was set
if _shell:
os.environ["SPACK_SHELL"] = _shell
@pytest.fixture()
def nullify_globals(request, monkeypatch):
ensure_configuration_fixture_run_before(request)
monkeypatch.setattr(spack.config, "CONFIG", None)
monkeypatch.setattr(spack.caches, "MISC_CACHE", None)
monkeypatch.setattr(spack.caches, "FETCH_CACHE", None)
monkeypatch.setattr(spack.repo, "PATH", None)
monkeypatch.setattr(spack.store, "STORE", None)
def pytest_runtest_setup(item):
# Skip test marked "not_on_windows" if they're run on Windows
not_on_windows_marker = item.get_closest_marker(name="not_on_windows")
if not_on_windows_marker and sys.platform == "win32":
pytest.skip(*not_on_windows_marker.args)
# Skip items marked "only windows" if they're run anywhere but Windows
only_windows_marker = item.get_closest_marker(name="only_windows")
if only_windows_marker and sys.platform != "win32":
pytest.skip(*only_windows_marker.args)
def _sequential_executor(*args, **kwargs):
return spack.util.parallel.SequentialExecutor()
@pytest.fixture(autouse=True)
def disable_parallel_buildcache_push(monkeypatch):
"""Disable process pools in tests."""
monkeypatch.setattr(spack.util.parallel, "make_concurrent_executor", _sequential_executor)
def _root_path(x, y, *, path):
return path
@pytest.fixture
def mock_modules_root(tmp_path: Path, monkeypatch):
"""Sets the modules root to a temporary directory, to avoid polluting configuration scopes."""
fn = functools.partial(_root_path, path=str(tmp_path))
monkeypatch.setattr(spack.modules.common, "root_path", fn)
@pytest.fixture()
def compiler_factory():
"""Factory for a compiler dict, taking a spec and an OS as arguments."""
def _factory(*, spec):
return {
"spec": f"{spec}",
"prefix": "/path",
"extra_attributes": {"compilers": {"c": "/path/bin/cc", "cxx": "/path/bin/cxx"}},
}
return _factory
@pytest.fixture()
def host_architecture_str():
"""Returns the broad architecture family (x86_64, aarch64, etc.)"""
return str(spack.vendor.archspec.cpu.host().family)
def _true(x):
return True
def _libc_from_python(self):
return spack.spec.Spec("glibc@=2.28", external_path="/some/path")
@pytest.fixture()
def do_not_check_runtimes_on_reuse(monkeypatch):
monkeypatch.setattr(spack.solver.reuse, "_has_runtime_dependencies", _true)
@pytest.fixture(autouse=True, scope="session")
def _c_compiler_always_exists():
fn = spack.solver.asp.c_compiler_runs
spack.solver.asp.c_compiler_runs = _true
mthd = spack.compilers.libraries.CompilerPropertyDetector.default_libc
spack.compilers.libraries.CompilerPropertyDetector.default_libc = _libc_from_python
yield
spack.solver.asp.c_compiler_runs = fn
spack.compilers.libraries.CompilerPropertyDetector.default_libc = mthd
@pytest.fixture(scope="session")
def mock_test_cache(tmp_path_factory: pytest.TempPathFactory):
cache_dir = tmp_path_factory.mktemp("cache")
return spack.util.file_cache.FileCache(cache_dir)
| MockBundle |
python | mlflow__mlflow | tests/langchain/test_langchain_autolog.py | {
"start": 11177,
"end": 11551
} | class ____(BaseCallbackHandler):
def __init__(self):
self.logs = []
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
self.logs.append("chain_start")
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
self.logs.append("chain_end")
| CustomCallbackHandler |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_constructors.py | {
"start": 3789,
"end": 7367
} | class ____:
def test_timestamp_constructor_invalid_fold_raise(self):
# Test for GH#25057
# Valid fold values are only [None, 0, 1]
msg = "Valid values for the fold argument are None, 0, or 1."
with pytest.raises(ValueError, match=msg):
Timestamp(123, fold=2)
def test_timestamp_constructor_pytz_fold_raise(self):
# Test for GH#25057
# pytz doesn't support fold. Check that we raise
# if fold is passed with pytz
pytz = pytest.importorskip("pytz")
msg = "pytz timezones do not support fold. Please use dateutil timezones."
tz = pytz.timezone("Europe/London")
with pytest.raises(ValueError, match=msg):
Timestamp(datetime(2019, 10, 27, 0, 30, 0, 0), tz=tz, fold=0)
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize(
"ts_input",
[
1572136200000000000,
1572136200000000000.0,
np.datetime64(1572136200000000000, "ns"),
"2019-10-27 01:30:00+01:00",
datetime(2019, 10, 27, 0, 30, 0, 0, tzinfo=timezone.utc),
],
)
def test_timestamp_constructor_fold_conflict(self, ts_input, fold):
# Test for GH#25057
# Check that we raise on fold conflict
msg = (
"Cannot pass fold with possibly unambiguous input: int, float, "
"numpy.datetime64, str, or timezone-aware datetime-like. "
"Pass naive datetime-like or build Timestamp from components."
)
with pytest.raises(ValueError, match=msg):
Timestamp(ts_input=ts_input, fold=fold)
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", None])
@pytest.mark.parametrize("fold", [0, 1])
def test_timestamp_constructor_retain_fold(self, tz, fold):
# Test for GH#25057
# Check that we retain fold
ts = Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold)
result = ts.fold
expected = fold
assert result == expected
@pytest.mark.parametrize(
"tz",
[
"dateutil/Europe/London",
zoneinfo.ZoneInfo("Europe/London"),
],
)
@pytest.mark.parametrize(
"ts_input,fold_out",
[
(1572136200000000000, 0),
(1572139800000000000, 1),
("2019-10-27 01:30:00+01:00", 0),
("2019-10-27 01:30:00+00:00", 1),
(datetime(2019, 10, 27, 1, 30, 0, 0, fold=0), 0),
(datetime(2019, 10, 27, 1, 30, 0, 0, fold=1), 1),
],
)
def test_timestamp_constructor_infer_fold_from_value(self, tz, ts_input, fold_out):
# Test for GH#25057
# Check that we infer fold correctly based on timestamps since utc
# or strings
ts = Timestamp(ts_input, tz=tz)
result = ts.fold
expected = fold_out
assert result == expected
@pytest.mark.parametrize("tz", ["dateutil/Europe/London"])
@pytest.mark.parametrize(
"fold,value_out",
[
(0, 1572136200000000),
(1, 1572139800000000),
],
)
def test_timestamp_constructor_adjust_value_for_fold(self, tz, fold, value_out):
# Test for GH#25057
# Check that we adjust value for fold correctly
# based on timestamps since utc
ts_input = datetime(2019, 10, 27, 1, 30)
ts = Timestamp(ts_input, tz=tz, fold=fold)
result = ts._value
expected = value_out
assert result == expected
| TestTimestampConstructorFoldKeyword |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_name02.py | {
"start": 315,
"end": 1710
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_name02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({"type": "line", "name": "New 1"})
chart2 = workbook.add_chart({"type": "line", "name": "New 2"})
chart1.axis_ids = [44271104, 45703168]
chart2.axis_ids = [80928128, 80934400]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart1.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart1.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart1.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart2.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart2.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart2.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart1)
worksheet.insert_chart("E24", chart2)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Textualize__textual | docs/examples/styles/background_transparency.py | {
"start": 80,
"end": 736
} | class ____(App):
"""Simple app to exemplify different transparency settings."""
CSS_PATH = "background_transparency.tcss"
def compose(self) -> ComposeResult:
yield Static("10%", id="t10")
yield Static("20%", id="t20")
yield Static("30%", id="t30")
yield Static("40%", id="t40")
yield Static("50%", id="t50")
yield Static("60%", id="t60")
yield Static("70%", id="t70")
yield Static("80%", id="t80")
yield Static("90%", id="t90")
yield Static("100%", id="t100")
if __name__ == "__main__":
app = BackgroundTransparencyApp()
app.run()
| BackgroundTransparencyApp |
python | sympy__sympy | sympy/polys/numberfields/modules.py | {
"start": 60743,
"end": 61344
} | class ____(ModuleEndomorphism):
r"""
An inner endomorphism on a module, i.e. the endomorphism corresponding to
multiplication by a fixed element.
"""
def __init__(self, domain, multiplier):
r"""
Parameters
==========
domain : :py:class:`~.Module`
The domain and codomain of the endomorphism.
multiplier : :py:class:`~.ModuleElement`
The element $a$ defining the mapping as $x \mapsto a x$.
"""
super().__init__(domain, lambda x: multiplier * x)
self.multiplier = multiplier
| InnerEndomorphism |
python | openai__openai-python | src/openai/resources/responses/input_tokens.py | {
"start": 13747,
"end": 14006
} | class ____:
def __init__(self, input_tokens: AsyncInputTokens) -> None:
self._input_tokens = input_tokens
self.count = _legacy_response.async_to_raw_response_wrapper(
input_tokens.count,
)
| AsyncInputTokensWithRawResponse |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_isbn13.py | {
"start": 1566,
"end": 3847
} | class ____(ColumnMapExpectation):
"""Expect column values to conform to the valid ISBN13 format."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_isbn13": [
"978-3-16-148410-0",
"978-1-86197-876-9",
"9783161484100",
"9783161 484100",
],
"malformed_isbn13": [
"",
"0-521-22151-X",
"978-3-16-148410-Z",
"This is not a valid ISBN13",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_isbn13"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_isbn13"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_isbn13"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["isbnlib"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIsbn13().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidIsbn13 |
python | kamyu104__LeetCode-Solutions | Python/count-of-smaller-numbers-after-self.py | {
"start": 2517,
"end": 4489
} | class ____(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
res = [0] * len(nums)
bst = self.BST()
# Insert into BST and get left count.
for i in reversed(xrange(len(nums))):
bst.insertNode(nums[i])
res[i] = bst.query(nums[i])
return res
class BST(object):
class BSTreeNode(object):
def __init__(self, val):
self.val = val
self.count = 0
self.left = self.right = None
def __init__(self):
self.root = None
# Insert node into BST.
def insertNode(self, val):
node = self.BSTreeNode(val)
if not self.root:
self.root = node
return
curr = self.root
while curr:
# Insert left if smaller.
if node.val < curr.val:
curr.count += 1 # Increase the number of left children.
if curr.left:
curr = curr.left
else:
curr.left = node
break
else: # Insert right if larger or equal.
if curr.right:
curr = curr.right
else:
curr.right = node
break
# Query the smaller count of the value.
def query(self, val):
count = 0
curr = self.root
while curr:
# Insert left.
if val < curr.val:
curr = curr.left
elif val > curr.val:
count += 1 + curr.count # Count the number of the smaller nodes.
curr = curr.right
else: # Equal.
return count + curr.count
return 0
| Solution3 |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 1626,
"end": 1736
} | class ____(PayloadEncodingError):
"""Not enough data to satisfy content length header."""
| ContentLengthError |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 5879,
"end": 8710
} | class ____(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
check_for_adapt=False,
**kwargs,
):
for c in self.columns:
if adapter:
if check_for_adapt:
c = adapter.adapt_check_present(c)
if c is None:
return
else:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
if fetch is None:
# None happens here only for dml bulk_persistence cases
# when context.DMLReturningColFilter is used
return
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or (
mapper.version_id_col is not None
and mapper._columntoproperty.get(mapper.version_id_col, None)
is self.parent_property
)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
default_scalar_value=self.parent_property._default_scalar_value,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
| _ColumnLoader |
python | lazyprogrammer__machine_learning_examples | ab_testing/bayesian_bandit.py | {
"start": 543,
"end": 1937
} | class ____:
def __init__(self, p):
self.p = p
self.a = 1
self.b = 1
self.N = 0 # for information only
def pull(self):
return np.random.random() < self.p
def sample(self):
return np.random.beta(self.a, self.b)
def update(self, x):
self.a += x
self.b += 1 - x
self.N += 1
def plot(bandits, trial):
x = np.linspace(0, 1, 200)
for b in bandits:
y = beta.pdf(x, b.a, b.b)
plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}")
plt.title(f"Bandit distributions after {trial} trials")
plt.legend()
plt.show()
def experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
sample_points = [5,10,20,50,100,200,500,1000,1500,1999]
rewards = np.zeros(NUM_TRIALS)
for i in range(NUM_TRIALS):
# Thompson sampling
j = np.argmax([b.sample() for b in bandits])
# plot the posteriors
if i in sample_points:
plot(bandits, i)
# pull the arm for the bandit with the largest sample
x = bandits[j].pull()
# update rewards
rewards[i] = x
# update the distribution for the bandit whose arm we just pulled
bandits[j].update(x)
# print total reward
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num times selected each bandit:", [b.N for b in bandits])
if __name__ == "__main__":
experiment()
| Bandit |
python | django__django | django/utils/text.py | {
"start": 5291,
"end": 5510
} | class ____(TruncateHTMLParser):
def process(self, data):
data = re.split(r"(?<=\S)\s+(?=\S)", data)
output = escape(" ".join(data[: self.remaining]))
return data, output
| TruncateWordsHTMLParser |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 18553,
"end": 20708
} | class ____(MultiStepInputSource, IHaveNew):
"""This step input is fans-in multiple sources in to a single input. The input will receive a list."""
sources: Sequence[StepInputSource]
# deprecated, preserved for back-compat
node_handle: NodeHandle
input_name: str
def __new__(
cls,
sources: Sequence[StepInputSource],
# deprecated, preserved for back-compat
node_handle: Optional[NodeHandle] = None,
input_name: Optional[str] = None,
):
check.sequence_param(sources, "sources", StepInputSource)
for source in sources:
check.invariant(
not isinstance(source, MultiStepInputSource),
"Can not have multiple levels of MultiStepInputSource StepInputSource",
)
return super().__new__(
cls,
sources=sources,
# add placeholder values for back-compat
node_handle=node_handle or NodeHandle("", None),
input_name=input_name or "",
)
def load_input_object(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
) -> Iterator[object]:
from dagster._core.events import DagsterEvent
# some upstream steps may have skipped and we allow fan-in to continue in their absence
source_handles_to_skip = list(
filter(
lambda x: not step_context.can_load(x),
self.step_output_handle_dependencies,
)
)
values = []
for inner_source in self.sources:
if (
isinstance(inner_source, FromStepOutput)
and inner_source.step_output_handle in source_handles_to_skip
):
continue
for event_or_input_value in inner_source.load_input_object(step_context, input_def):
if isinstance(event_or_input_value, DagsterEvent):
yield event_or_input_value
else:
values.append(event_or_input_value)
yield values
@whitelist_for_serdes
@record_custom
| FromMultipleSources |
python | modin-project__modin | modin/tests/pandas/extensions/test_dataframe_extensions.py | {
"start": 5812,
"end": 6900
} | class ____:
"""
Make sure to test that we override special "dunder" methods like __len__
correctly. python calls these methods with DataFrame.__len__(obj)
rather than getattr(obj, "__len__")().
source: https://docs.python.org/3/reference/datamodel.html#special-lookup
"""
def test_len(self, Backend1):
@register_dataframe_accessor(name="__len__", backend=Backend1)
def always_get_1(self):
return 1
df = pd.DataFrame([1, 2, 3])
assert len(df) == 3
backend_df = df.set_backend(Backend1)
assert len(backend_df) == 1
assert backend_df.__len__() == 1
def test_repr(self, Backend1):
@register_dataframe_accessor(name="__repr__", backend=Backend1)
def simple_repr(self) -> str:
return "dataframe_string"
df = pd.DataFrame([1, 2, 3])
assert repr(df) == repr(df.modin.to_pandas())
backend_df = df.set_backend(Backend1)
assert repr(backend_df) == "dataframe_string"
assert backend_df.__repr__() == "dataframe_string"
| TestDunders |
python | sqlalchemy__sqlalchemy | test/sql/test_external_traversal.py | {
"start": 54805,
"end": 88759
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_correlation_on_clone(self):
t1alias = t1.alias("t1alias")
t2alias = t2.alias("t2alias")
vis = sql_util.ClauseAdapter(t1alias)
s = (
select(literal_column("*"))
.select_from(t1alias, t2alias)
.scalar_subquery()
)
froms = list(s._iterate_from_elements())
assert t2alias in froms
assert t1alias in froms
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = vis.traverse(s)
froms = list(s._iterate_from_elements())
assert t2alias in froms # present because it was not cloned
assert t1alias in froms # present because the adapter placed
# it there and was also not cloned
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = (
select(literal_column("*"))
.select_from(t1alias, t2alias)
.correlate(t2alias)
.scalar_subquery()
)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = vis.traverse(s)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = (
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col1)
.scalar_subquery()
)
self.assert_compile(
select(t1.c.col1, s),
"SELECT table1.col1, (SELECT * FROM table2 "
"WHERE table1.col1 = table2.col1) AS "
"anon_1 FROM table1",
)
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = (
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col1)
.correlate(t1)
.scalar_subquery()
)
self.assert_compile(
select(t1.c.col1, s),
"SELECT table1.col1, (SELECT * FROM table2 "
"WHERE table1.col1 = table2.col1) AS "
"anon_1 FROM table1",
)
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
def test_adapt_select_w_unlabeled_fn(self):
expr = func.count(t1.c.col1)
stmt = select(t1, expr)
self.assert_compile(
stmt,
"SELECT table1.col1, table1.col2, table1.col3, "
"count(table1.col1) AS count_1 FROM table1",
)
stmt2 = select(stmt.subquery())
self.assert_compile(
stmt2,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3, anon_1.count_1 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3, count(table1.col1) AS count_1 "
"FROM table1) AS anon_1",
)
is_(
stmt2.selected_columns[3],
stmt2.selected_columns.corresponding_column(expr),
)
is_(
sql_util.ClauseAdapter(stmt2).replace(expr),
stmt2.selected_columns[3],
)
column_adapter = sql_util.ColumnAdapter(stmt2)
is_(column_adapter.columns[expr], stmt2.selected_columns[3])
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_correlate_except_on_clone(self, use_adapt_from):
# test [ticket:4537]'s issue
t1alias = t1.alias("t1alias")
j = t1.join(t1alias, t1.c.col1 == t1alias.c.col2)
if use_adapt_from:
vis = sql_util.ClauseAdapter(j, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(j)
# "control" subquery - uses correlate which has worked w/ adaption
# for a long time
control_s = (
select(t2.c.col1)
.where(t2.c.col1 == t1.c.col1)
.correlate(t2)
.scalar_subquery()
)
# test subquery - given only t1 and t2 in the enclosing selectable,
# will do the same thing as the "control" query since the correlation
# works out the same
s = (
select(t2.c.col1)
.where(t2.c.col1 == t1.c.col1)
.correlate_except(t1)
.scalar_subquery()
)
# use both subqueries in statements
control_stmt = select(control_s, t1.c.col1, t2.c.col1).select_from(
t1.join(t2, t1.c.col1 == t2.c.col1)
)
stmt = select(s, t1.c.col1, t2.c.col1).select_from(
t1.join(t2, t1.c.col1 == t2.c.col1)
)
# they are the same
self.assert_compile(
control_stmt,
"SELECT "
"(SELECT table2.col1 FROM table1 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1",
)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table2.col1 FROM table1 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1",
)
# now test against the adaption of "t1" into "t1 JOIN t1alias".
# note in the control case, we aren't actually testing that
# Select is processing the "correlate" list during the adaption
# since we aren't adapting the "correlate"
self.assert_compile(
vis.traverse(control_stmt),
"SELECT "
"(SELECT table2.col1 FROM "
"table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"JOIN table2 ON table1.col1 = table2.col1",
)
# but here, correlate_except() does have the thing we're adapting
# so whatever is in there has to be expanded out to include
# the adaptation target, in this case "t1 JOIN t1alias".
self.assert_compile(
vis.traverse(stmt),
"SELECT "
"(SELECT table2.col1 FROM "
"table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"JOIN table2 ON table1.col1 = table2.col1",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_correlate_except_with_mixed_tables(self, use_adapt_from):
# test [ticket:6060]'s issue
stmt = select(
t1.c.col1,
select(func.count(t2.c.col1))
.where(t2.c.col1 == t1.c.col1)
.correlate_except(t2)
.scalar_subquery(),
)
self.assert_compile(
stmt,
"SELECT table1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2 "
"WHERE table2.col1 = table1.col1) AS anon_1 "
"FROM table1",
)
subq = (
select(t1)
.join(t2, t1.c.col1 == t2.c.col1)
.where(t2.c.col2 == "x")
.subquery()
)
if use_adapt_from:
vis = sql_util.ClauseAdapter(subq, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(subq)
if use_adapt_from:
self.assert_compile(
vis.traverse(stmt),
"SELECT anon_1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2 WHERE "
"table2.col1 = anon_1.col1) AS anon_2 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 ON table1.col1 = "
"table2.col1 WHERE table2.col2 = :col2_1) AS anon_1",
)
else:
# here's the buggy version. table2 gets yanked out of the
# correlated subquery also. AliasedClass now uses
# adapt_from_selectables in all cases
self.assert_compile(
vis.traverse(stmt),
"SELECT anon_1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 ON "
"table1.col1 = table2.col1 WHERE table2.col2 = :col2_1) AS "
"anon_1 WHERE table2.col1 = anon_1.col1) AS anon_2 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 "
"ON table1.col1 = table2.col1 "
"WHERE table2.col2 = :col2_1) AS anon_1",
)
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table("users", column("id"))
addresses = table("addresses", column("id"), column("user_id"))
ualias = users.alias()
s = (
select(func.count(addresses.c.id))
.where(users.c.id == addresses.c.user_id)
.correlate(users)
)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(
sql_util.ClauseAdapter(j1).traverse(s),
"SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users_1.id = "
"addresses.user_id",
)
def test_prev_entities_adapt(self):
"""test #6503"""
m = MetaData()
users = Table("users", m, Column("id", Integer, primary_key=True))
addresses = Table(
"addresses",
m,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("users.id")),
)
ualias = users.alias()
s = select(users).join(addresses).with_only_columns(addresses.c.id)
s = sql_util.ClauseAdapter(ualias).traverse(s)
self.assert_compile(
s,
"SELECT addresses.id FROM users AS users_1 "
"JOIN addresses ON users_1.id = addresses.user_id",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_1(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label("foo"))
assert list(_from_objects(ff)) == [t1alias]
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_2(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(select(literal_column("*")).select_from(t1)),
"SELECT * FROM table1 AS t1alias",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_3(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(literal_column("*")).where(t1.c.col1 == t2.c.col2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = table2.col2",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_4(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = table2.col2",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_5(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select(t1alias, t2).where(
t1alias.c.col1
== vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t1)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_6(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select(t1alias, t2).where(
t1alias.c.col1
== vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t2)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)",
)
def test_table_to_alias_7(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case((t1.c.col1 == 5, t1.c.col2), else_=t1.c.col1)),
"CASE WHEN (t1alias.col1 = :col1_1) THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_8(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case((5, t1.c.col2), value=t1.c.col1, else_=t1.c.col1)
),
"CASE t1alias.col1 WHEN :param_1 THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_9(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
self.assert_compile(
s.select(), "SELECT foo.* FROM (SELECT * FROM table1) AS foo"
)
def test_table_to_alias_10(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(s.select()),
"SELECT foo.* FROM (SELECT * FROM table1 AS t1alias) AS foo",
)
def test_table_to_alias_11(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
self.assert_compile(
s.select(), "SELECT foo.* FROM (SELECT * FROM table1) AS foo"
)
def test_table_to_alias_12(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label("foo"))
self.assert_compile(
select(ff),
"SELECT count(t1alias.col1) AS foo FROM table1 AS t1alias",
)
assert list(_from_objects(ff)) == [t1alias]
# def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select(func.count(t1.c
# .col1).l abel('foo')), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_13(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select(literal_column("*")).where(t1.c.col1 == t2.c.col2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"AS t2alias WHERE t1alias.col1 = "
"t2alias.col2",
)
def test_table_to_alias_14(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select("*").where(t1.c.col1 == t2.c.col2).select_from(t1, t2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"AS t2alias WHERE t1alias.col1 = "
"t2alias.col2",
)
def test_table_to_alias_15(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select(t1alias, t2alias).where(
t1alias.c.col1
== vis.traverse(
select("*")
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t1)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1 AS col1_1, t2alias.col2 AS col2_1, "
"t2alias.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)",
)
def test_table_to_alias_16(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(
t2alias.c.col2
== vis.traverse(
select("*")
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t2)
.scalar_subquery()
)
),
"SELECT t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table2 AS t2alias WHERE t2alias.col2 = "
"(SELECT * FROM table1 AS t1alias WHERE "
"t1alias.col1 = t2alias.col2)",
)
def test_include_exclude(self):
m = MetaData()
a = Table(
"a",
m,
Column("id", Integer, primary_key=True),
Column(
"xxx_id",
Integer,
ForeignKey("a.id", name="adf", use_alter=True),
),
)
e = a.c.id == a.c.xxx_id
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter(
b,
include_fn=lambda x: x in {a.c.id},
equivalents={a.c.id: {a.c.id}},
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table("a", m, Column("x", Integer), Column("y", Integer))
b = Table("b", m, Column("x", Integer), Column("y", Integer))
c = Table("c", m, Column("x", Integer), Column("y", Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
b, equivalents={a.c.x: {c.c.x}, c.c.x: {a.c.x}}
)
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table("a", m, Column("x", Integer), Column("y", Integer))
b = Table("b", m, Column("x", Integer), Column("y", Integer))
c = Table("c", m, Column("x", Integer), Column("y", Integer))
alias = select(a).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
alias, equivalents={b.c.x: {a.c.x}, c.c.x: {b.c.x}}
)
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
c = Table(
"c",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
d = Table(
"d",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
j1 = a.outerjoin(b)
j2 = (
select(j1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(
j4,
"c JOIN (SELECT a.id AS a_id, b.id AS "
"b_id, b.aid AS b_aid FROM a LEFT OUTER "
"JOIN b ON a.id = b.aid) AS anon_1 ON anon_1.b_id = c.bid "
"LEFT OUTER JOIN d ON anon_1.a_id = d.aid",
)
j5 = (
j3.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery("foo")
)
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(
j6,
"(SELECT c.id AS c_id, c.bid AS c_bid, "
"anon_1.a_id AS anon_1_a_id, anon_1.b_id AS anon_1_b_id, "
"anon_1.b_aid AS "
"anon_1_b_aid FROM c JOIN (SELECT a.id AS a_id, "
"b.id AS b_id, b.aid AS b_aid FROM a LEFT "
"OUTER JOIN b ON a.id = b.aid) AS anon_1 ON anon_1.b_id = "
"c.bid) AS foo LEFT OUTER JOIN d ON "
"foo.anon_1_a_id = d.aid",
)
def test_derived_from(self):
assert select(t1).is_derived_from(t1)
assert not select(t2).is_derived_from(t1)
assert not t1.is_derived_from(select(t1))
assert t1.alias().is_derived_from(t1)
s1 = select(t1, t2).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
self.assert_compile(
sql_util.ClauseAdapter(s2).traverse(s1),
"SELECT foo.col1, foo.col2, foo.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 "
"AS col2, table1.col3 AS col3 FROM table1) "
"AS foo LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
)
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(
sql_util.ClauseAdapter(s2).traverse(j).select(),
"SELECT anon_1.col1, anon_1.col2, "
"anon_1.col3, table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM (SELECT foo.col1 AS "
"col1, foo.col2 AS col2, foo.col3 AS col3 "
"FROM (SELECT table1.col1 AS col1, "
"table1.col2 AS col2, table1.col3 AS col3 "
"FROM table1) AS foo LIMIT :param_1 OFFSET "
":param_2) AS anon_1 LEFT OUTER JOIN "
"table2 ON anon_1.col1 = table2.col1",
{"param_1": 5, "param_2": 10},
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_aliasedselect_to_aliasedselect_join_nested_table(
self, use_adapt_from
):
"""test the logic in clauseadapter regarding not traversing aliases.
adapt_from_selectables case added to test #6762, which is a regression
from #6060
"""
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
talias = t1.alias("bar")
# here is the problem. s2 is derived from s1 which is derived
# from t1
assert s2.is_derived_from(t1)
# however, s2 is not derived from talias, which *is* derived from t1
assert not s2.is_derived_from(talias)
# therefore, talias gets its table replaced, except for a rule
# we added to ClauseAdapter to stop traversal if the selectable is
# not derived from an alias of a table. This rule was previously
# in Alias._copy_internals().
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
if use_adapt_from:
vis = sql_util.ClauseAdapter(s2, adapt_from_selectables=[s1])
else:
vis = sql_util.ClauseAdapter(s2)
self.assert_compile(
vis.traverse(j).select(),
"SELECT anon_1.col1, anon_1.col2, "
"anon_1.col3, bar.col1 AS col1_1, bar.col2 AS col2_1, "
"bar.col3 AS col3_1 "
"FROM (SELECT foo.col1 AS col1, foo.col2 "
"AS col2, foo.col3 AS col3 FROM (SELECT "
"table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1) AS foo "
"LIMIT :param_1 OFFSET :param_2) AS anon_1 "
"LEFT OUTER JOIN table1 AS bar ON "
"anon_1.col1 = bar.col1",
{"param_1": 5, "param_2": 10},
)
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(func.count(t1.c.col1)),
"count(table1_1.col1)",
)
s = select(func.count(t1.c.col1))
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(s),
"SELECT count(table1_1.col1) AS count_1 "
"FROM table1 AS table1_1",
)
def test_table_valued_column(self):
"""test #6775"""
stmt = select(func.some_json_func(t1.table_valued()))
self.assert_compile(
stmt,
"SELECT some_json_func(table1) AS some_json_func_1 FROM table1",
)
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(stmt),
"SELECT some_json_func(table1_1) AS some_json_func_1 "
"FROM table1 AS table1_1",
)
def test_recursive(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
c = Table(
"c",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
d = Table(
"d",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
u = union(
a.join(b).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
a.join(d).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).traverse(
select(c.c.bid).where(c.c.bid == u.c.b_aid)
),
"SELECT c.bid "
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid",
)
def test_label_anonymize_one(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label("expr")
expr_adapted = adapter.traverse(expr)
stmt = select(expr, expr_adapted).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS expr, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_1 "
"ORDER BY expr, anon_1",
)
def test_label_anonymize_two(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label(None)
expr_adapted = adapter.traverse(expr)
stmt = select(expr, expr_adapted).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS anon_1, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_2 "
"ORDER BY anon_1, anon_2",
)
def test_label_anonymize_three(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(
t1a, anonymize_labels=True, allow_label_resolve=False
)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label(None)
l1 = expr
is_(l1._order_by_label_element, l1)
eq_(l1._allow_label_resolve, True)
expr_adapted = adapter.traverse(expr)
l2 = expr_adapted
is_(l2._order_by_label_element, l2)
eq_(l2._allow_label_resolve, False)
l3 = adapter.traverse(expr)
is_(l3._order_by_label_element, l3)
eq_(l3._allow_label_resolve, False)
| ClauseAdapterTest |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/selector.py | {
"start": 6498,
"end": 6887
} | class ____(ComputedBase):
"""Dead selector composite condition class."""
@staticmethod
def visit_eq(value: list[QueryType]) -> Condition:
return is_dead_click(ClickSelectorComposite.visit_eq(value))
@staticmethod
def visit_neq(value: list[QueryType]) -> Condition:
return is_dead_click(ClickSelectorComposite.visit_neq(value))
| DeadClickSelectorComposite |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 78,
"end": 153
} | class ____(TypedDict):
anomaly_type: str
anomaly_score: float
| Anomaly |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 9231,
"end": 9471
} | class ____(_Permission[ClusterAction]):
def _to_weaviate(self) -> List[WeaviatePermission]:
return [
{
"action": action,
}
for action in self.actions
]
| _ClusterPermission |
python | getsentry__sentry | src/sentry/api/serializers/models/projectownership.py | {
"start": 377,
"end": 632
} | class ____(ProjectOwnershipResponseOptional):
raw: str
fallthrough: bool
dateCreated: datetime
lastUpdated: datetime
isActive: bool
autoAssignment: str
codeownersAutoSync: bool
@register(ProjectOwnership)
| ProjectOwnershipResponse |
python | ApeWorX__ape | tests/functional/conversion/test_encode_structs.py | {
"start": 762,
"end": 3170
} | class ____(Struct):
a: int
b: bytes
c: bool
d: AddressType
e: str # Gets ignored because not in ABI.
EXPECTED = HexBytes(
"0000000000000000000000000000000000000000000000000000000000000001"
"0200000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000001"
"000000000000000000000000d9b7fdb3fc0a0aa3a507dcf0976bc23d49a9c7a3"
)
ADDRESS = cast(AddressType, "0xD9b7fdb3FC0A0Aa3A507dCf0976bc23D49a9C7A3")
DATA_BY_TYPE_KEY = {
"tuple": (1, HexBytes("0x02"), True, ADDRESS),
"dict": {"a": 1, "b": HexBytes("0x02"), "c": True, "d": ADDRESS},
"object": Struct(a=1, b=HexBytes("0x02"), c=True, d=ADDRESS),
}
@pytest.mark.parametrize("data_type", list(DATA_BY_TYPE_KEY.keys()))
def test_encode_structs(data_type, ethereum):
data = DATA_BY_TYPE_KEY[data_type]
encode_calldata = ethereum.encode_calldata
assert encode_calldata(ABI, data) == EXPECTED
def test_encode_structs_as_tuple_with_unconverted(sender, ethereum):
normal_data: tuple = DATA_BY_TYPE_KEY["tuple"] # type: ignore[assignment]
data = list(normal_data)
data[-1] = sender
actual = ethereum.encode_calldata(ABI, normal_data)
assert actual == EXPECTED
def test_encode_structs_as_dict_with_unconverted(sender, ethereum):
normal_data: dict = DATA_BY_TYPE_KEY["dict"] # type: ignore[assignment]
data = dict(normal_data)
data["d"] = sender
actual = ethereum.encode_calldata(ABI, normal_data)
assert actual == EXPECTED
def test_encode_structs_as_object_with_unconverted(sender, ethereum):
normal_data: Struct = DATA_BY_TYPE_KEY["object"] # type: ignore[assignment]
data = normal_data.model_copy()
data.d = sender
actual = ethereum.encode_calldata(ABI, normal_data)
assert actual == EXPECTED
def test_encode_struct_using_dict_with_more_fields(sender, ethereum):
normal_data: dict = DATA_BY_TYPE_KEY["dict"] # type: ignore[assignment]
data = dict(normal_data)
data["extra"] = "foobar" # Should be ignored since not in ABI.
actual = ethereum.encode_calldata(ABI, normal_data)
assert actual == EXPECTED
def test_encode_struct_using_object_with_more_fields(sender, ethereum):
obj = SimilarStruct(a=1, b=HexBytes("0x02"), c=True, d=ADDRESS, e="foobar")
actual = ethereum.encode_calldata(ABI, obj)
assert actual == EXPECTED
| SimilarStruct |
python | huggingface__transformers | tests/models/funnel/test_modeling_funnel.py | {
"start": 15687,
"end": 17906
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(FunnelBaseModel, FunnelForMultipleChoice, FunnelForSequenceClassification) if is_torch_available() else ()
)
def setUp(self):
self.model_tester = FunnelModelTester(self, base=True)
self.config_tester = ConfigTester(self, config_class=FunnelConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_base_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
# overwrite from test_modeling_common
def test_training(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if model_class.__name__ == "FunnelBaseModel":
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]:
if hasattr(module, param) and getattr(module, param) is not None:
weight = getattr(module, param)
weight.data.fill_(3)
@require_torch
@require_sentencepiece
@require_tokenizers
| FunnelBaseModelTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_glue.py | {
"start": 6894,
"end": 8249
} | class ____:
EXPECTED_WAITER_NAME = "data_quality_rule_recommendation_run_complete"
RUN_ID = "1234567890abc"
def test_serialization(self):
"""Assert that arguments and classpath are correctly serialized."""
trigger = GlueDataQualityRuleRecommendationRunCompleteTrigger(recommendation_run_id=self.RUN_ID)
classpath, kwargs = trigger.serialize()
assert classpath == BASE_TRIGGER_CLASSPATH + "GlueDataQualityRuleRecommendationRunCompleteTrigger"
assert kwargs.get("recommendation_run_id") == self.RUN_ID
@pytest.mark.asyncio
@mock.patch.object(GlueDataQualityHook, "get_waiter")
@mock.patch.object(GlueDataQualityHook, "get_async_conn")
async def test_run_success(self, mock_async_conn, mock_get_waiter):
mock_async_conn.__aenter__.return_value = mock.MagicMock()
mock_get_waiter().wait = AsyncMock()
trigger = GlueDataQualityRuleRecommendationRunCompleteTrigger(recommendation_run_id=self.RUN_ID)
generator = trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({"status": "success", "recommendation_run_id": self.RUN_ID})
assert_expected_waiter_type(mock_get_waiter, self.EXPECTED_WAITER_NAME)
mock_get_waiter().wait.assert_called_once()
| TestGlueDataQualityRuleRecommendationRunCompleteTrigger |
python | kubernetes-client__python | kubernetes/client/models/v1_port_status.py | {
"start": 383,
"end": 6026
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'error': 'str',
'port': 'int',
'protocol': 'str'
}
attribute_map = {
'error': 'error',
'port': 'port',
'protocol': 'protocol'
}
def __init__(self, error=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
"""V1PortStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._error = None
self._port = None
self._protocol = None
self.discriminator = None
if error is not None:
self.error = error
self.port = port
self.protocol = protocol
@property
def error(self):
"""Gets the error of this V1PortStatus. # noqa: E501
Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
:return: The error of this V1PortStatus. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this V1PortStatus.
Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
:param error: The error of this V1PortStatus. # noqa: E501
:type: str
"""
self._error = error
@property
def port(self):
"""Gets the port of this V1PortStatus. # noqa: E501
Port is the port number of the service port of which status is recorded here # noqa: E501
:return: The port of this V1PortStatus. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1PortStatus.
Port is the port number of the service port of which status is recorded here # noqa: E501
:param port: The port of this V1PortStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def protocol(self):
"""Gets the protocol of this V1PortStatus. # noqa: E501
Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
:return: The protocol of this V1PortStatus. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this V1PortStatus.
Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
:param protocol: The protocol of this V1PortStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and protocol is None: # noqa: E501
raise ValueError("Invalid value for `protocol`, must not be `None`") # noqa: E501
self._protocol = protocol
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PortStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PortStatus):
return True
return self.to_dict() != other.to_dict()
| V1PortStatus |
python | PrefectHQ__prefect | tests/server/orchestration/test_global_policy.py | {
"start": 16491,
"end": 18169
} | class ____:
@pytest.mark.parametrize(
"initial_state_type", [states.StateType.PAUSED, states.StateType.PENDING]
)
async def test_rule_unsets_resuming_indicator_on_running(
self,
session,
initial_state_type,
initialize_orchestration,
):
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
resuming=True,
)
assert ctx.run.empirical_policy.resuming
async with RemoveResumingIndicator(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert not ctx.run.empirical_policy.resuming
@pytest.mark.parametrize(
"initial_state_type", [states.StateType.PAUSED, states.StateType.PENDING]
)
async def test_running_resuming_flow_does_not_increment_run_count(
self,
session,
initial_state_type,
initialize_orchestration,
):
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
resuming=True,
flow_run_count=42,
)
assert ctx.run.empirical_policy.resuming
assert ctx.run.run_count == 42
async with IncrementFlowRunCount(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.run.run_count == 42
| TestPausingRules |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 10842,
"end": 10980
} | class ____(_NumberBoundError):
code = 'number.not_lt'
msg_template = 'ensure this value is less than {limit_value}'
| NumberNotLtError |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/rpc_communicator.py | {
"start": 1098,
"end": 6729
} | class ____(Communicator):
def __init__(self, worker_id=0, base_port=5005, timeout_wait=30):
"""
Python side of the grpc communication. Python is the server and Unity the client
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
:int worker_id: Offset from base_port. Used for training multiple environments simultaneously.
:int timeout_wait: Timeout (in seconds) to wait for a response before exiting.
"""
super().__init__(worker_id, base_port)
self.port = base_port + worker_id
self.worker_id = worker_id
self.timeout_wait = timeout_wait
self.server = None
self.unity_to_external = None
self.is_open = False
self.create_server()
def create_server(self):
"""
Creates the GRPC server.
"""
self.check_port(self.port)
try:
# Establish communication grpc
self.server = grpc.server(
thread_pool=ThreadPoolExecutor(max_workers=10),
options=(("grpc.so_reuseport", 1),),
)
self.unity_to_external = UnityToExternalServicerImplementation()
add_UnityToExternalProtoServicer_to_server(
self.unity_to_external, self.server
)
# Using unspecified address, which means that grpc is communicating on all IPs
# This is so that the docker container can connect.
self.server.add_insecure_port("[::]:" + str(self.port))
self.server.start()
self.is_open = True
except Exception:
raise UnityWorkerInUseException(self.worker_id)
def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if platform == "linux" or platform == "linux2":
# On linux, the port remains unusable for TIME_WAIT=60 seconds after closing
# SO_REUSEADDR frees the port right after closing the environment
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(("localhost", port))
except OSError:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close()
def poll_for_timeout(self, poll_callback: Optional[PollCallback] = None) -> None:
"""
Polls the GRPC parent connection for data, to be used before calling recv. This prevents
us from hanging indefinitely in the case where the environment process has died or was not
launched.
Additionally, a callback can be passed to periodically check the state of the environment.
This is used to detect the case when the environment dies without cleaning up the connection,
so that we can stop sooner and raise a more appropriate error.
"""
deadline = time.monotonic() + self.timeout_wait
callback_timeout_wait = self.timeout_wait // 10
while time.monotonic() < deadline:
if self.unity_to_external.parent_conn.poll(callback_timeout_wait):
# Got an acknowledgment from the connection
return
if poll_callback:
# Fire the callback - if it detects something wrong, it should raise an exception.
poll_callback()
# Got this far without reading any data from the connection, so it must be dead.
raise UnityTimeOutException(
"The Unity environment took too long to respond. Make sure that :\n"
"\t The environment does not need user interaction to launch\n"
'\t The Agents\' Behavior Parameters > Behavior Type is set to "Default"\n'
"\t The environment and the Python interface have compatible versions.\n"
"\t If you're running on a headless server without graphics support, turn off display "
"by either passing --no-graphics option or build your Unity executable as server build."
)
def initialize(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> UnityOutputProto:
self.poll_for_timeout(poll_callback)
aca_param = self.unity_to_external.parent_conn.recv().unity_output
message = UnityMessageProto()
message.header.status = 200
message.unity_input.CopyFrom(inputs)
self.unity_to_external.parent_conn.send(message)
self.unity_to_external.parent_conn.recv()
return aca_param
def exchange(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> Optional[UnityOutputProto]:
message = UnityMessageProto()
message.header.status = 200
message.unity_input.CopyFrom(inputs)
self.unity_to_external.parent_conn.send(message)
self.poll_for_timeout(poll_callback)
output = self.unity_to_external.parent_conn.recv()
if output.header.status != 200:
return None
return output.unity_output
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the grpc connection.
"""
if self.is_open:
message_input = UnityMessageProto()
message_input.header.status = 400
self.unity_to_external.parent_conn.send(message_input)
self.unity_to_external.parent_conn.close()
self.server.stop(False)
self.is_open = False
| RpcCommunicator |
python | ray-project__ray | python/ray/train/_internal/framework_checkpoint.py | {
"start": 258,
"end": 1491
} | class ____(Checkpoint):
"""A checkpoint to preserve the functionality of legacy
framework-specific checkpoints.
Example:
>>> import tempfile
>>> checkpoint = FrameworkCheckpoint(tempfile.mkdtemp())
>>> checkpoint.get_preprocessor() is None
True
>>> preprocessor = Preprocessor()
>>> preprocessor._attr = 1234
>>> checkpoint.set_preprocessor(preprocessor)
>>> checkpoint.get_preprocessor()._attr
1234
"""
def get_preprocessor(self) -> Optional[Preprocessor]:
"""Return the preprocessor stored in the checkpoint.
Returns:
The preprocessor stored in the checkpoint, or ``None`` if no
preprocessor was stored.
"""
metadata = self.get_metadata()
preprocessor_bytes = metadata.get(PREPROCESSOR_KEY)
if preprocessor_bytes is None:
return None
return ray_pickle.loads(hex_to_binary(preprocessor_bytes))
def set_preprocessor(self, preprocessor: Preprocessor):
"""Store a preprocessor with the checkpoint."""
self.update_metadata(
{PREPROCESSOR_KEY: binary_to_hex(ray_pickle.dumps(preprocessor))}
)
| FrameworkCheckpoint |
python | rushter__MLAlgorithms | mla/ensemble/gbm.py | {
"start": 1421,
"end": 1632
} | class ____(Loss):
"""Least squares loss"""
def grad(self, actual, predicted):
return actual - predicted
def hess(self, actual, predicted):
return np.ones_like(actual)
| LeastSquaresLoss |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/pgvector_processor.py | {
"start": 1169,
"end": 1846
} | class ____(SqlConfig):
"""Configuration for the Postgres cache.
Also inherits config from the JsonlWriter, which is responsible for writing files to disk.
"""
host: str
port: int
database: str
username: str
password: SecretString | str
@overrides
def get_sql_alchemy_url(self) -> SecretString:
"""Return the SQLAlchemy URL to use."""
return SecretString(
f"postgresql+psycopg2://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}"
)
@overrides
def get_database_name(self) -> str:
"""Return the name of the database."""
return self.database
| PostgresConfig |
python | skorch-dev__skorch | skorch/exceptions.py | {
"start": 741,
"end": 860
} | class ____(SkorchException):
"""Error raised when the predictions of an LLM have low probability"""
| LowProbabilityError |
python | spyder-ide__spyder | installers-conda/build_conda_pkgs.py | {
"start": 8550,
"end": 11898
} | class ____(BuildCondaPkg):
name = "spyder"
norm = False
source = os.environ.get('SPYDER_SOURCE', HERE.parent)
feedstock = "https://github.com/conda-forge/spyder-feedstock"
feedstock_branch = get_spy_feedstock_branch()
def _patch_source(self):
self.logger.info("Patching Spyder source...")
file = self._bld_src / "spyder/__init__.py"
file_text = file.read_text()
ver_str = tuple(self.version.split('.'))
file_text = re.sub(
r'^(version_info = ).*',
rf'\g<1>{ver_str}',
file_text,
flags=re.MULTILINE
)
file.write_text(file_text)
# Only write patch if necessary
if self.repo.git.diff():
self.logger.info(f"Creating {self._patchfile.name}...")
self.repo.git.diff(output=self._patchfile.as_posix())
self.repo.git.stash()
def patch_recipe(self):
# Get current Spyder requirements
spyder_base_reqs = ['python']
spyder_base_reqs += yaml.load(
REQ_MAIN.read_text())['dependencies']
if os.name == 'nt':
win_requirements = yaml.load(
REQ_WINDOWS.read_text())['dependencies']
spyder_base_reqs += win_requirements
spyder_base_reqs.append('ptyprocess >=0.5')
elif sys.platform == 'darwin':
mac_requirements = yaml.load(
REQ_MAC.read_text())['dependencies']
if 'python.app' in mac_requirements:
mac_requirements.remove('python.app')
spyder_base_reqs += mac_requirements
spyder_base_reqs.append('__osx')
else:
linux_requirements = yaml.load(
REQ_LINUX.read_text())['dependencies']
spyder_base_reqs += linux_requirements
spyder_base_reqs.append('__linux')
spyder_reqs = [f"spyder-base =={self.version}"]
for req in spyder_base_reqs.copy():
if req.startswith(
('pyqt ', 'pyqtwebengine ', 'qtconsole ', 'fcitx-qt5 ')
):
spyder_reqs.append(req)
spyder_base_reqs.remove(req)
if req.startswith('qtconsole '):
spyder_base_reqs.append(
req.replace('qtconsole', 'qtconsole-base')
)
if sys.platform == "darwin":
spyder_base_reqs.append("__osx")
if sys.platform.startswith("linux"):
spyder_base_reqs.append("__linux")
spyder_reqs.append("__linux")
self.recipe_clobber.update({
"requirements": {"run": spyder_base_reqs},
# Since outputs is a list, the entire list must be reproduced with
# the current run requirements
"outputs": [
{
"name": "spyder-base"
},
{
"name": "spyder",
"build": {"noarch": "python"},
"requirements": {"run": spyder_reqs},
"test": {
"requires": ["pip"],
"commands": ["spyder -h", "python -m pip check"],
"imports": ["spyder"]
}
}
]
})
super().patch_recipe()
| SpyderCondaPkg |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self8.py | {
"start": 551,
"end": 643
} | class ____(str):
pass
v1 = str.__new__(MyStr)
reveal_type(v1, expected_text="MyStr")
| MyStr |
python | plotly__plotly.py | plotly/graph_objs/histogram2d/_textfont.py | {
"start": 233,
"end": 9876
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2d"
_path_str = "histogram2d.textfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | psf__black | tests/data/cases/comments2.py | {
"start": 7332,
"end": 7640
} | class ____:
def _init_host(self, parsed) -> None:
if parsed.hostname is None or not parsed.hostname.strip(): # type: ignore
pass
#######################
### SECTION COMMENT ###
#######################
instruction() # comment with bad spacing
# END COMMENTS
# MORE END COMMENTS
| Test |
python | doocs__leetcode | solution/0000-0099/0009.Palindrome Number/Solution.py | {
"start": 0,
"end": 247
} | class ____:
def isPalindrome(self, x: int) -> bool:
if x < 0 or (x and x % 10 == 0):
return False
y = 0
while y < x:
y = y * 10 + x % 10
x //= 10
return x in (y, y // 10)
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_testdecorators.py | {
"start": 2513,
"end": 12371
} | class ____:
@given(integers())
def test_abs_non_negative(self, x):
assert abs(x) >= 0
assert isinstance(self, TestCases)
@given(x=integers())
def test_abs_non_negative_varargs(self, x, *args):
assert abs(x) >= 0
assert isinstance(self, TestCases)
@given(x=integers())
def test_abs_non_negative_varargs_kwargs(self, *args, **kw):
assert abs(kw["x"]) >= 0
assert isinstance(self, TestCases)
@given(x=integers())
def test_abs_non_negative_varargs_kwargs_only(*args, **kw):
assert abs(kw["x"]) >= 0
assert isinstance(args[0], TestCases)
@fails
@given(integers())
def test_int_is_always_negative(self, x):
assert x < 0
@fails
@given(floats(), floats())
def test_float_addition_cancels(self, x, y):
assert x + (y - x) == y
@fails
@given(x=integers(min_value=0, max_value=3), name=text())
def test_can_be_given_keyword_args(x, name):
assume(x > 0)
assert len(name) < x
@fails
@given(one_of(floats(), booleans()), one_of(floats(), booleans()))
def test_one_of_produces_different_values(x, y):
assert type(x) == type(y)
@given(just(42))
def test_is_the_answer(x):
assert x == 42
@given(integers(1, 10))
def test_integers_are_in_range(x):
assert 1 <= x <= 10
@given(integers(min_value=100))
def test_integers_from_are_from(x):
assert x >= 100
def test_does_not_catch_interrupt_during_falsify():
called = False
@given(integers())
def flaky_base_exception(x):
nonlocal called
if not called:
called = True
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
flaky_base_exception()
@given(lists(integers(), unique=True), integers())
def test_removing_an_element_from_a_unique_list(xs, y):
assume(len(set(xs)) == len(xs))
try:
xs.remove(y)
except ValueError:
pass
assert y not in xs
@fails
@given(lists(integers(), min_size=2), data())
def test_removing_an_element_from_a_non_unique_list(xs, data):
y = data.draw(sampled_from(xs))
xs.remove(y)
assert y not in xs
@given(sets(sampled_from(list(range(10)))))
def test_can_test_sets_sampled_from(xs):
assert all(isinstance(x, int) for x in xs)
assert all(0 <= x < 10 for x in xs)
mix = one_of(sampled_from([1, 2, 3]), text())
@fails
@given(mix, mix)
def test_can_mix_sampling_with_generating(x, y):
assert type(x) == type(y)
@fails
@given(frozensets(integers()))
def test_can_find_large_sum_frozenset(xs):
assert sum(xs) < 100
def test_prints_on_failure_by_default():
@given(integers(), integers())
@settings(max_examples=1000)
def test_ints_are_sorted(balthazar, evans):
assume(evans >= 0)
assert balthazar <= evans
assert_falsifying_output(test_ints_are_sorted, balthazar=1, evans=0)
def test_does_not_print_on_success():
@settings(verbosity=Verbosity.normal)
@given(integers())
def test_is_an_int(x):
return
with capture_out() as out:
test_is_an_int()
out = out.getvalue()
lines = [l.strip() for l in out.split("\n")]
assert all(not l for l in lines), lines
@given(sampled_from([1]))
def test_can_sample_from_single_element(x):
assert x == 1
@fails
@given(lists(integers()))
def test_list_is_sorted(xs):
assert sorted(xs) == xs
@fails
@given(floats(1.0, 2.0))
def test_is_an_endpoint(x):
assert x in {1.0, 2.0}
def test_breaks_bounds():
@fails
@given(x=integers())
@settings(derandomize=True, max_examples=10_000)
def test_is_bounded(t, x):
assert x < t
for t in [1, 10, 100, 1000]:
test_is_bounded(t)
@given(x=booleans())
def test_can_test_kwargs_only_methods(**kwargs):
assert isinstance(kwargs["x"], bool)
@fails_with(UnicodeEncodeError)
@given(text())
@settings(max_examples=100)
def test_is_ascii(x):
x.encode("ascii")
@fails
@given(text())
def test_is_not_ascii(x):
try:
x.encode("ascii")
raise AssertionError
except UnicodeEncodeError:
pass
@fails
@given(text(min_size=2))
@settings(max_examples=100, derandomize=True)
def test_can_find_string_with_duplicates(s):
assert len(set(s)) == len(s)
@fails
@given(text(min_size=1))
@settings(derandomize=True)
def test_has_ascii(x):
if not x:
return
ascii_characters = (
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ \t\n"
)
assert any(c in ascii_characters for c in x)
@xfail_on_crosshair(Why.symbolic_outside_context, strict=False)
def test_can_derandomize():
values = []
@fails
@given(integers())
@settings(derandomize=True, database=None)
def test_blah(x):
values.append(x)
assert x > 0
test_blah()
assert values
v1 = values
values = []
test_blah()
assert v1 == values
def test_can_run_without_database():
@given(integers())
@settings(database=None)
def test_blah(x):
raise AssertionError
with pytest.raises(AssertionError):
test_blah()
@skipif_emscripten
def test_can_run_with_database_in_thread():
results = []
@given(integers())
def test_blah(x):
raise ValueError
def run_test():
try:
test_blah()
except ValueError:
results.append("success")
# Run once in the main thread and once in another thread. Execution is
# strictly serial, so no need for locking.
run_test()
assert results == ["success"]
thread = threading.Thread(target=run_test)
thread.start()
thread.join()
assert results == ["success", "success"]
@given(integers())
def test_can_call_an_argument_f(f):
# See issue https://github.com/HypothesisWorks/hypothesis-python/issues/38
# for details
pass
Litter = namedtuple("Litter", ("kitten1", "kitten2"))
@given(builds(Litter, integers(), integers()))
def test_named_tuples_are_of_right_type(litter):
assert isinstance(litter, Litter)
@fails_with(AttributeError)
@given(integers().map(lambda x: x.nope))
@settings(suppress_health_check=list(HealthCheck))
def test_fails_in_reify(x):
pass
@given(text("a"))
def test_a_text(x):
assert set(x).issubset(set("a"))
@given(text(""))
def test_empty_text(x):
assert not x
@given(text("abcdefg"))
def test_mixed_text(x):
assert set(x).issubset(set("abcdefg"))
@xfail_on_crosshair(Why.other, strict=False) # runs ~five failing examples
def test_when_set_to_no_simplifies_runs_failing_example_twice():
failing = []
@given(integers())
@settings(
phases=no_shrink,
max_examples=100,
verbosity=Verbosity.normal,
report_multiple_bugs=False,
)
def foo(x):
if x > 11:
note("Lo")
failing.append(x)
raise AssertionError
with pytest.raises(AssertionError) as err:
foo()
assert len(failing) == 2
assert len(set(failing)) == 1
assert "Falsifying example" in "\n".join(err.value.__notes__)
assert "Lo" in err.value.__notes__
@given(integers().filter(lambda x: x % 4 == 0))
def test_filtered_values_satisfy_condition(i):
assert i % 4 == 0
def nameless_const(x):
def f(u, v):
return u
return functools.partial(f, x)
@given(sets(booleans()).map(nameless_const(2)))
def test_can_map_nameless(x):
assert x == 2
@given(integers(0, 10).flatmap(nameless_const(just(3))))
def test_can_flatmap_nameless(x):
assert x == 3
def test_can_be_used_with_none_module():
def test_is_cool(i):
pass
test_is_cool.__module__ = None
test_is_cool = given(integers())(test_is_cool)
test_is_cool()
def test_does_not_print_notes_if_all_succeed():
@given(integers())
@settings(verbosity=Verbosity.normal)
def test(i):
note("Hi there")
with capture_out() as out, reporting.with_reporter(reporting.default):
test()
assert not out.getvalue()
def test_prints_notes_once_on_failure():
@given(lists(integers()))
@settings(database=None, verbosity=Verbosity.normal)
def test(xs):
note("Hi there")
if sum(xs) <= 100:
raise ValueError
with pytest.raises(ValueError) as err:
test()
assert err.value.__notes__.count("Hi there") == 1
@given(lists(integers(), max_size=0))
def test_empty_lists(xs):
assert xs == []
@xfail_on_crosshair(Why.other, strict=False)
def test_given_usable_inline_on_lambdas():
xs = []
given(booleans())(lambda x: xs.append(x))()
assert len(xs) == 2
assert set(xs) == {False, True}
def test_notes_high_filter_rates_in_unsatisfiable_error():
@given(st.integers())
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def f(v):
assume(False)
with pytest.raises(
Unsatisfiable,
match=(
r"Unable to satisfy assumptions of f\. 1000 of 1000 examples "
r"failed a \.filter\(\) or assume\(\)"
),
):
f()
# crosshair generates one valid input before verifying the test function,
# so the Unsatisfiable check never occurs.
# (not strict due to slowness causing crosshair to bail out on the first input,
# maybe?)
@xfail_on_crosshair(Why.other, strict=False)
def test_notes_high_overrun_rates_in_unsatisfiable_error():
@given(st.binary(min_size=100))
@settings(
suppress_health_check=[
HealthCheck.data_too_large,
HealthCheck.too_slow,
HealthCheck.large_base_example,
]
)
def f(v):
pass
match = (
r"1000 of 1000 examples were too large to finish generating; try "
r"reducing the typical size of your inputs\?"
)
with (
pytest.raises(Unsatisfiable, match=match),
buffer_size_limit(10),
):
f()
| TestCases |
python | aio-libs__aiohttp | aiohttp/web_runner.py | {
"start": 3351,
"end": 4250
} | class ____(BaseSite):
__slots__ = ("_path",)
def __init__(
self,
runner: "BaseRunner[Any]",
path: PathLike,
*,
ssl_context: SSLContext | None = None,
backlog: int = 128,
) -> None:
super().__init__(
runner,
ssl_context=ssl_context,
backlog=backlog,
)
self._path = path
@property
def name(self) -> str:
scheme = "https" if self._ssl_context else "http"
return f"{scheme}://unix:{self._path}:"
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_unix_server(
server,
self._path,
ssl=self._ssl_context,
backlog=self._backlog,
)
| UnixSite |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/util.py | {
"start": 1257,
"end": 1349
} | class ____(Protocol):
_trans_context_manager: Optional[TransactionalContext]
| _TConsSubject |
python | tiangolo__fastapi | docs_src/sql_databases/tutorial001_py39.py | {
"start": 156,
"end": 1798
} | class ____(SQLModel, table=True):
id: Union[int, None] = Field(default=None, primary_key=True)
name: str = Field(index=True)
age: Union[int, None] = Field(default=None, index=True)
secret_name: str
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/")
def create_hero(hero: Hero, session: Session = Depends(get_session)) -> Hero:
session.add(hero)
session.commit()
session.refresh(hero)
return hero
@app.get("/heroes/")
def read_heroes(
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
) -> list[Hero]:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}")
def read_hero(hero_id: int, session: Session = Depends(get_session)) -> Hero:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.delete("/heroes/{hero_id}")
def delete_hero(hero_id: int, session: Session = Depends(get_session)):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
| Hero |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 188051,
"end": 194912
} | class ____:
math_dtypes = [np.int_, np.float64, np.complex128]
def test_constructor1(self):
# unsorted triplet format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.])
coo = self.coo_container((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4, 3), coo.toarray())
# using Python lists and a specified dtype
coo = self.coo_container(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64)
dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64)
assert_array_equal(dense, coo.toarray())
def test_constructor2(self):
# unsorted triplet format with duplicates (which are summed)
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = self.coo_container((data,(row,col)),(3,3))
mat = array([[4, -1, 0], [0, 0, 9], [-3, 7, 0]])
assert_array_equal(mat, coo.toarray())
def test_constructor3(self):
# empty matrix
coo = self.coo_container((4,3))
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.toarray(), zeros((4, 3)))
def test_constructor4(self):
# from dense matrix
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = self.coo_container(mat)
assert_array_equal(coo.toarray(), mat)
# upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = self.coo_container(mat)
expected = mat if self.is_array_test else mat.reshape(1, -1)
assert_array_equal(coo.toarray(), expected)
# error if second arg interpreted as shape (gh-9919)
with pytest.raises(TypeError, match=r'object cannot be interpreted'):
self.coo_container([0, 11, 22, 33], ([0, 1, 2, 3], [0, 0, 0, 0]))
# error if explicit shape arg doesn't match the dense matrix
with pytest.raises(ValueError, match=r'inconsistent shapes'):
self.coo_container([0, 11, 22, 33], shape=(4, 4))
def test_constructor_data_ij_dtypeNone(self):
data = [1]
coo = self.coo_container((data, ([0], [0])), dtype=None)
assert coo.dtype == np.array(data).dtype
@pytest.mark.xfail(run=False, reason='COO does not have a __getitem__')
def test_iterator(self):
pass
def test_todia_all_zeros(self):
zeros = [[0, 0]]
dia = self.coo_container(zeros).todia()
assert_array_equal(dia.toarray(), zeros)
def test_sum_duplicates(self):
coo = self.coo_container((4,3))
coo.sum_duplicates()
coo = self.coo_container(([1,2], ([1,0], [1,0])))
coo.sum_duplicates()
assert_array_equal(coo.toarray(), [[2,0],[0,1]])
coo = self.coo_container(([1,2], ([1,1], [1,1])))
coo.sum_duplicates()
assert_array_equal(coo.toarray(), [[0,0],[0,3]])
assert_array_equal(coo.row, [1])
assert_array_equal(coo.col, [1])
assert_array_equal(coo.data, [3])
def test_todok_duplicates(self):
coo = self.coo_container(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
dok = coo.todok()
assert_array_equal(dok.toarray(), coo.toarray())
def test_tocompressed_duplicates(self):
coo = self.coo_container(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
csr = coo.tocsr()
assert_equal(csr.nnz + 2, coo.nnz)
csc = coo.tocsc()
assert_equal(csc.nnz + 2, coo.nnz)
def test_has_canonical_format(self):
"Ensure has_canonical_format memoizes state for sum_duplicates"
A = self.coo_container((2, 3))
assert_equal(A.has_canonical_format, True)
A_array = np.array([[0, 2, 0]])
A_coords_form = (np.array([2]), (np.array([0]), np.array([1])))
A_coords_dups = (np.array([1, 1]), (np.array([0, 0]), np.array([1, 1])))
A = self.coo_container(A_array)
assert A.has_canonical_format is True
A = self.coo_container(A_coords_form)
assert A.has_canonical_format is False
A.sum_duplicates()
assert A.has_canonical_format is True
A = self.coo_container(A, copy=True)
assert A.has_canonical_format is True
A = self.coo_container(A, copy=False)
assert A.has_canonical_format is False
A.sum_duplicates()
assert A.has_canonical_format is True
A = self.coo_container(A_coords_dups)
assert A.has_canonical_format is False
assert_equal(A.nnz, 2) # duplicates
A.sum_duplicates()
assert A.has_canonical_format is True
assert_equal(A.nnz, 1)
# manually set
A.has_canonical_format = False
assert_equal(A.has_canonical_format, False)
assert_equal(A.nnz, 1) # incorrectly False
A.sum_duplicates() # check flag updated
assert_equal(A.has_canonical_format, True)
A = self.coo_container(A_coords_dups)
A.has_canonical_format = True
assert_equal(A.has_canonical_format, True)
assert_equal(A.nnz, 2) # incorrectly True
A.sum_duplicates() # check dups not removed due to flag
assert_equal(A.nnz, 2) # still has duplicates!!!!
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
row = array([0, 0, 0, 1, 1, 1, 1, 1])
col = array([1, 2, 3, 4, 5, 6, 7, 8])
asp = self.coo_container((data, (row, col)), shape=(2,10))
bsp = asp.copy()
asp.eliminate_zeros()
assert_((asp.data != 0).all())
assert_array_equal(asp.toarray(), bsp.toarray())
def test_reshape_copy(self):
arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]
new_shape = (2, 6)
x = self.coo_container(arr)
y = x.reshape(new_shape)
assert_(y.data is x.data)
y = x.reshape(new_shape, copy=False)
assert_(y.data is x.data)
y = x.reshape(new_shape, copy=True)
assert_(not np.may_share_memory(y.data, x.data))
def test_large_dimensions_reshape(self):
# Test that reshape is immune to integer overflow when number of elements
# exceeds 2^31-1
mat1 = self.coo_container(([1], ([3000000], [1000])), (3000001, 1001))
mat2 = self.coo_container(([1], ([1000], [3000000])), (1001, 3000001))
# assert_array_equal is slow for big matrices because it expects dense
# Using __ne__ and nnz instead
assert_((mat1.reshape((1001, 3000001), order='C') != mat2).nnz == 0)
assert_((mat2.reshape((3000001, 1001), order='F') != mat1).nnz == 0)
| BaseTestCOO |
python | readthedocs__readthedocs.org | readthedocs/audit/models.py | {
"start": 2057,
"end": 7743
} | class ____(TimeStampedModel):
"""
Track user actions for audit purposes.
A log can be attached to a user and/or project and organization.
If the user, project or organization are deleted the log will be preserved,
and the deleted user/project/organization can be accessed via the ``log_*`` attributes.
"""
# pylint: disable=too-many-instance-attributes
PAGEVIEW = "pageview"
PAGEVIEW_TEXT = _("Page view")
DOWNLOAD = "download"
DOWNLOAD_TEXT = _("Download")
AUTHN = "authentication"
AUTHN_TEXT = _("Authentication")
AUTHN_FAILURE = "authentication-failure"
AUTHN_FAILURE_TEXT = _("Authentication failure")
LOGOUT = "log-out"
LOGOUT_TEXT = _("Log out")
INVITATION_SENT = "invitation-sent"
INVITATION_SENT_TEXT = _("Invitation sent")
INVITATION_REVOKED = "invitation-revoked"
INVITATION_REVOKED_TEXT = _("Invitation revoked")
INVITATION_ACCEPTED = "invitation-accepted"
INVITATION_ACCEPTED_TEXT = _("Invitation accepted")
INVITATION_DECLINED = "invitation-declined"
INVITATION_DECLINED_TEXT = _("Invitation declined")
CHOICES = (
(PAGEVIEW, PAGEVIEW_TEXT),
(DOWNLOAD, DOWNLOAD_TEXT),
(AUTHN, AUTHN_TEXT),
(AUTHN_FAILURE, AUTHN_FAILURE_TEXT),
(LOGOUT, LOGOUT_TEXT),
(INVITATION_SENT, INVITATION_SENT_TEXT),
(INVITATION_REVOKED, INVITATION_REVOKED_TEXT),
(INVITATION_ACCEPTED, INVITATION_ACCEPTED_TEXT),
(INVITATION_DECLINED, INVITATION_DECLINED_TEXT),
)
user = models.ForeignKey(
User,
verbose_name=_("User"),
null=True,
on_delete=models.SET_NULL,
db_index=True,
)
# Extra information in case the user is deleted.
log_user_id = models.IntegerField(
_("User ID"),
blank=True,
null=True,
db_index=True,
)
log_user_username = models.CharField(
_("Username"),
max_length=150,
blank=True,
null=True,
db_index=True,
)
project = models.ForeignKey(
"projects.Project",
verbose_name=_("Project"),
null=True,
db_index=True,
on_delete=models.SET_NULL,
)
# Extra information in case the project is deleted.
log_project_id = models.IntegerField(
_("Project ID"),
blank=True,
null=True,
db_index=True,
)
log_project_slug = models.CharField(
_("Project slug"),
max_length=63,
blank=True,
null=True,
db_index=True,
)
organization = models.ForeignKey(
"organizations.Organization",
verbose_name=_("Organization"),
null=True,
db_index=True,
on_delete=models.SET_NULL,
)
log_organization_id = models.IntegerField(
_("Organization ID"),
blank=True,
null=True,
db_index=True,
)
log_organization_slug = models.CharField(
_("Organization slug"),
max_length=255,
blank=True,
null=True,
db_index=True,
)
action = models.CharField(
_("Action"),
max_length=150,
choices=CHOICES,
)
auth_backend = models.CharField(
_("Auth backend"),
max_length=250,
blank=True,
null=True,
)
ip = models.CharField(
_("IP address"),
blank=True,
null=True,
max_length=250,
)
browser = models.CharField(
_("Browser user-agent"),
max_length=250,
blank=True,
null=True,
)
# Resource can be a path,
# set it slightly greater than ``HTMLFile.path``.
resource = models.CharField(
_("Resource"),
max_length=5500,
blank=True,
null=True,
)
data = models.JSONField(
null=True,
blank=True,
help_text=_(
"Extra data about the log entry. Its structure depends on the type of log entry."
),
)
objects = AuditLogManager()
class Meta:
ordering = ["-created"]
def save(self, **kwargs):
if self.user:
self.log_user_id = self.user.id
self.log_user_username = self.user.username
if self.project:
self.log_project_id = self.project.id
self.log_project_slug = self.project.slug
organization = self.project.organizations.first()
if organization:
self.organization = organization
if self.organization:
self.log_organization_id = self.organization.id
self.log_organization_slug = self.organization.slug
self._truncate_browser()
super().save(**kwargs)
def _truncate_browser(self):
browser_max_length = self._meta.get_field("browser").max_length
if self.browser and len(self.browser) > browser_max_length:
suffix = " - Truncated"
truncated_at = browser_max_length - len(suffix)
self.browser = self.browser[:truncated_at] + suffix
def auth_backend_display(self):
"""
Get a string representation for backends that aren't part of the normal login.
.. note::
The backends listed here are implemented on .com only.
"""
backend = self.auth_backend or ""
backend_displays = {
"TemporaryAccessTokenBackend": _("shared link"),
"TemporaryAccessPasswordBackend": _("shared password"),
}
for name, display in backend_displays.items():
if name in backend:
return display
return ""
def __str__(self):
return self.action
| AuditLog |
python | matplotlib__matplotlib | lib/matplotlib/axes/_base.py | {
"start": 7420,
"end": 21579
} | class ____:
"""
Process variable length arguments to `~.Axes.plot`, to support ::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
def __init__(self, output='Line2D'):
_api.check_in_list(['Line2D', 'Polygon', 'coordinates'], output=output)
self.output = output
self.set_prop_cycle(None)
def set_prop_cycle(self, cycler):
self._idx = 0
self._cycler_items = [*mpl._val_or_rc(cycler, 'axes.prop_cycle')]
def __call__(self, axes, *args, data=None, return_kwargs=False, **kwargs):
axes._process_unit_info(kwargs=kwargs)
for pos_only in "xy":
if pos_only in kwargs:
raise _api.kwarg_error(inspect.stack()[1].function, pos_only)
if not args:
return
if data is None: # Process dict views
args = [cbook.sanitize_sequence(a) for a in args]
else: # Process the 'data' kwarg.
replaced = [mpl._replacer(data, arg) for arg in args]
if len(args) == 1:
label_namer_idx = 0
elif len(args) == 2: # Can be x, y or y, c.
# Figure out what the second argument is.
# 1) If the second argument cannot be a format shorthand, the
# second argument is the label_namer.
# 2) Otherwise (it could have been a format shorthand),
# a) if we did perform a substitution, emit a warning, and
# use it as label_namer.
# b) otherwise, it is indeed a format shorthand; use the
# first argument as label_namer.
try:
_process_plot_format(args[1])
except ValueError: # case 1)
label_namer_idx = 1
else:
if replaced[1] is not args[1]: # case 2a)
_api.warn_external(
f"Second argument {args[1]!r} is ambiguous: could "
f"be a format string but is in 'data'; using as "
f"data. If it was intended as data, set the "
f"format string to an empty string to suppress "
f"this warning. If it was intended as a format "
f"string, explicitly pass the x-values as well. "
f"Alternatively, rename the entry in 'data'.",
RuntimeWarning)
label_namer_idx = 1
else: # case 2b)
label_namer_idx = 0
elif len(args) == 3:
label_namer_idx = 1
else:
raise ValueError(
"Using arbitrary long args with data is not supported due "
"to ambiguity of arguments; use multiple plotting calls "
"instead")
if kwargs.get("label") is None:
kwargs["label"] = mpl._label_from_arg(
replaced[label_namer_idx], args[label_namer_idx])
args = replaced
ambiguous_fmt_datakey = data is not None and len(args) == 2
if len(args) >= 4 and not cbook.is_scalar_or_string(
kwargs.get("label")):
raise ValueError("plot() with multiple groups of data (i.e., "
"pairs of x and y) does not support multiple "
"labels")
# Repeatedly grab (x, y) or (x, y, format) from the front of args and
# massage them into arguments to plot() or fill().
while args:
this, args = args[:2], args[2:]
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
yield from self._plot_args(
axes, this, kwargs, ambiguous_fmt_datakey=ambiguous_fmt_datakey,
return_kwargs=return_kwargs
)
def get_next_color(self):
"""Return the next color in the cycle."""
entry = self._cycler_items[self._idx]
if "color" in entry:
self._idx = (self._idx + 1) % len(self._cycler_items) # Advance cycler.
return entry["color"]
else:
return "k"
def _getdefaults(self, kw, ignore=frozenset()):
"""
If some keys in the property cycle (excluding those in the set
*ignore*) are absent or set to None in the dict *kw*, return a copy
of the next entry in the property cycle, excluding keys in *ignore*.
Otherwise, don't advance the property cycle, and return an empty dict.
"""
defaults = self._cycler_items[self._idx]
if any(kw.get(k, None) is None for k in {*defaults} - ignore):
self._idx = (self._idx + 1) % len(self._cycler_items) # Advance cycler.
# Return a new dict to avoid exposing _cycler_items entries to mutation.
return {k: v for k, v in defaults.items() if k not in ignore}
else:
return {}
def _setdefaults(self, defaults, kw):
"""
Add to the dict *kw* the entries in the dict *default* that are absent
or set to None in *kw*.
"""
for k in defaults:
if kw.get(k, None) is None:
kw[k] = defaults[k]
def _make_line(self, axes, x, y, kw, kwargs):
kw = {**kw, **kwargs} # Don't modify the original kw.
self._setdefaults(self._getdefaults(kw), kw)
seg = mlines.Line2D(x, y, **kw)
return seg, kw
def _make_coordinates(self, axes, x, y, kw, kwargs):
kw = {**kw, **kwargs} # Don't modify the original kw.
self._setdefaults(self._getdefaults(kw), kw)
return (x, y), kw
def _make_polygon(self, axes, x, y, kw, kwargs):
# Polygon doesn't directly support unitized inputs.
x = axes.convert_xunits(x)
y = axes.convert_yunits(y)
kw = kw.copy() # Don't modify the original kw.
kwargs = kwargs.copy()
# Ignore 'marker'-related properties as they aren't Polygon
# properties, but they are Line2D properties, and so they are
# likely to appear in the default cycler construction.
# This is done here to the defaults dictionary as opposed to the
# other two dictionaries because we do want to capture when a
# *user* explicitly specifies a marker which should be an error.
# We also want to prevent advancing the cycler if there are no
# defaults needed after ignoring the given properties.
ignores = ({'marker', 'markersize', 'markeredgecolor',
'markerfacecolor', 'markeredgewidth'}
# Also ignore anything provided by *kwargs*.
| {k for k, v in kwargs.items() if v is not None})
# Only using the first dictionary to use as basis
# for getting defaults for back-compat reasons.
# Doing it with both seems to mess things up in
# various places (probably due to logic bugs elsewhere).
default_dict = self._getdefaults(kw, ignores)
self._setdefaults(default_dict, kw)
# Looks like we don't want "color" to be interpreted to
# mean both facecolor and edgecolor for some reason.
# So the "kw" dictionary is thrown out, and only its
# 'color' value is kept and translated as a 'facecolor'.
# This design should probably be revisited as it increases
# complexity.
facecolor = kw.get('color', None)
# Throw out 'color' as it is now handled as a facecolor
default_dict.pop('color', None)
# To get other properties set from the cycler
# modify the kwargs dictionary.
self._setdefaults(default_dict, kwargs)
seg = mpatches.Polygon(np.column_stack((x, y)),
facecolor=facecolor,
fill=kwargs.get('fill', True),
closed=kw['closed'])
seg.set(**kwargs)
return seg, kwargs
def _plot_args(self, axes, tup, kwargs, *,
return_kwargs=False, ambiguous_fmt_datakey=False):
"""
Process the arguments of ``plot([x], y, [fmt], **kwargs)`` calls.
This processes a single set of ([x], y, [fmt]) parameters; i.e. for
``plot(x, y, x2, y2)`` it will be called twice. Once for (x, y) and
once for (x2, y2).
x and y may be 2D and thus can still represent multiple datasets.
For multiple datasets, if the keyword argument *label* is a list, this
will unpack the list and assign the individual labels to the datasets.
Parameters
----------
tup : tuple
A tuple of the positional parameters. This can be one of
- (y,)
- (x, y)
- (y, fmt)
- (x, y, fmt)
kwargs : dict
The keyword arguments passed to ``plot()``.
return_kwargs : bool
Whether to also return the effective keyword arguments after label
unpacking as well.
ambiguous_fmt_datakey : bool
Whether the format string in *tup* could also have been a
misspelled data key.
Returns
-------
result
If *return_kwargs* is false, a list of Artists representing the
dataset(s).
If *return_kwargs* is true, a list of (Artist, effective_kwargs)
representing the dataset(s). See *return_kwargs*.
The Artist is either `.Line2D` (if called from ``plot()``) or
`.Polygon` otherwise.
"""
if len(tup) > 1 and isinstance(tup[-1], str):
# xy is tup with fmt stripped (could still be (y,) only)
*xy, fmt = tup
linestyle, marker, color = _process_plot_format(
fmt, ambiguous_fmt_datakey=ambiguous_fmt_datakey)
elif len(tup) == 3:
raise ValueError('third arg must be a format string')
else:
xy = tup
linestyle, marker, color = None, None, None
# Don't allow any None value; these would be up-converted to one
# element array of None which causes problems downstream.
if any(v is None for v in tup):
raise ValueError("x, y, and format string must not be None")
kw = {}
for prop_name, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
# check for conflicts between fmt and kwargs
if (fmt.lower() != 'none'
and prop_name in kwargs
and val != 'None'):
# Technically ``plot(x, y, 'o', ls='--')`` is a conflict
# because 'o' implicitly unsets the linestyle
# (linestyle='None').
# We'll gracefully not warn in this case because an
# explicit set via kwargs can be seen as intention to
# override an implicit unset.
# Note: We don't val.lower() != 'none' because val is not
# necessarily a string (can be a tuple for colors). This
# is safe, because *val* comes from _process_plot_format()
# which only returns 'None'.
_api.warn_external(
f"{prop_name} is redundantly defined by the "
f"'{prop_name}' keyword argument and the fmt string "
f'"{fmt}" (-> {prop_name}={val!r}). The keyword '
f"argument will take precedence.")
kw[prop_name] = val
if len(xy) == 2:
x = _check_1d(xy[0])
y = _check_1d(xy[1])
else:
x, y = index_of(xy[-1])
if axes.xaxis is not None:
axes.xaxis.update_units(x)
if axes.yaxis is not None:
axes.yaxis.update_units(y)
if x.shape[0] != y.shape[0]:
raise ValueError(f"x and y must have same first dimension, but "
f"have shapes {x.shape} and {y.shape}")
if x.ndim > 2 or y.ndim > 2:
raise ValueError(f"x and y can be no greater than 2D, but have "
f"shapes {x.shape} and {y.shape}")
if x.ndim == 1:
x = x[:, np.newaxis]
if y.ndim == 1:
y = y[:, np.newaxis]
if self.output == 'Line2D':
make_artist = self._make_line
elif self.output == 'Polygon':
kw['closed'] = kwargs.get('closed', True)
make_artist = self._make_polygon
elif self.output == 'coordinates':
make_artist = self._make_coordinates
else:
_api.check_in_list(['Line2D', 'Polygon', 'coordinates'], output=self.output)
ncx, ncy = x.shape[1], y.shape[1]
if ncx > 1 and ncy > 1 and ncx != ncy:
raise ValueError(f"x has {ncx} columns but y has {ncy} columns")
if ncx == 0 or ncy == 0:
return []
label = kwargs.get('label')
n_datasets = max(ncx, ncy)
if cbook.is_scalar_or_string(label):
labels = [label] * n_datasets
elif len(label) == n_datasets:
labels = label
else:
raise ValueError(
f"label must be scalar or have the same length as the input "
f"data, but found {len(label)} for {n_datasets} datasets.")
result = (make_artist(axes, x[:, j % ncx], y[:, j % ncy], kw,
{**kwargs, 'label': label})
for j, label in enumerate(labels))
if return_kwargs:
return list(result)
else:
return [l[0] for l in result]
@_api.define_aliases({"facecolor": ["fc"]})
| _process_plot_var_args |
python | walkccc__LeetCode | solutions/3463. Check If Digits Are Equal in String After Operations II/3463.py | {
"start": 0,
"end": 976
} | class ____:
# Same as 3461. Check If Digits Are Equal in String After Operations I
def hasSameDigits(self, s: str) -> bool:
n = len(s)
num1 = 0
num2 = 0
for i in range(n - 1):
coefficient = self._nCMOD10(n - 2, i)
num1 += (coefficient * (int(s[i]) - 0)) % 10
num1 %= 10
num2 += (coefficient * (int(s[i + 1]) - 0)) % 10
num2 %= 10
return num1 == num2
def _nCMOD10(self, n: int, k: int) -> int:
"""Returns (n, k) % 10."""
mod2 = self._lucasTheorem(n, k, 2)
mod5 = self._lucasTheorem(n, k, 5)
lookup = [
[0, 6, 2, 8, 4], # mod2 == 0
[5, 1, 7, 3, 9] # mod2 == 1
]
return lookup[mod2][mod5]
def _lucasTheorem(self, n: int, k: int, prime: int) -> int:
"""Returns (n, k) % prime."""
res = 1
while n > 0 or k > 0:
nMod = n % prime
MOD = k % prime
res *= math.comb(nMod, MOD)
res %= prime
n //= prime
k //= prime
return res
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-root-of-n-ary-tree.py | {
"start": 527,
"end": 917
} | class ____(object):
def findRoot(self, tree):
"""
:type tree: List['Node']
:rtype: 'Node'
"""
root = 0
for node in tree:
root ^= node.val
for child in node.children:
root ^= child.val
for node in tree:
if node.val == root:
return node
return None
| Solution2 |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_natural_language.py | {
"start": 2120,
"end": 2587
} | class ____:
@patch("airflow.providers.google.cloud.operators.natural_language.CloudNaturalLanguageHook")
def test_minimal_green_path(self, hook_mock):
hook_mock.return_value.analyze_entity_sentiment.return_value = ANALYZE_ENTITY_SENTIMENT_RESPONSE
op = CloudNaturalLanguageAnalyzeEntitySentimentOperator(task_id="task-id", document=DOCUMENT)
resp = op.execute({})
assert resp == {}
| TestCloudLanguageAnalyzeEntitySentimentOperator |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/events/span.py | {
"start": 57,
"end": 298
} | class ____(BaseEvent):
"""
SpanDropEvent.
Args:
err_str (str): Error string.
"""
err_str: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SpanDropEvent"
| SpanDropEvent |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0064_add_feature_future_default_true.py | {
"start": 149,
"end": 830
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0063_extend_domain_from_timestamp_model"),
]
operations = [
migrations.AddField(
model_name="feature",
name="future_default_true",
field=models.BooleanField(
default=False, verbose_name="Default all future projects to True"
),
),
migrations.AlterField(
model_name="feature",
name="default_true",
field=models.BooleanField(
default=False, verbose_name="Default all past projects to True"
),
),
]
| Migration |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 10576,
"end": 11896
} | class ____(TestEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = nx.reportviews.OutEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = (
"OutEdgeDataView([(0, 1, {}), (1, 2, {}), "
+ "(2, 3, {}), (3, 4, {}), "
+ "(4, 5, {}), (5, 6, {}), "
+ "(6, 7, {}), (7, 8, {})])"
)
assert repr(ev) == rep
def test_len(self):
evr = self.eview(self.G)
ev = evr(data="foo")
assert len(ev) == 8
assert len(evr(1)) == 1
assert len(evr([1, 2, 3])) == 3
assert len(self.G.edges(1)) == 1
assert len(self.G.edges()) == 8
assert len(self.G.edges) == 8
H = self.G.copy()
H.add_edge(1, 1)
assert len(H.edges(1)) == 2
assert len(H.edges()) == 9
assert len(H.edges) == 9
def test_contains_with_nbunch(self):
evr = self.eview(self.G)
ev = evr(nbunch=[0, 2])
assert (0, 1) in ev
assert (1, 2) not in ev
assert (2, 3) in ev
assert (3, 4) not in ev
assert (4, 5) not in ev
assert (5, 6) not in ev
assert (7, 8) not in ev
assert (8, 9) not in ev
| TestOutEdgeDataView |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 35618,
"end": 37372
} | class ____(BaseModel):
type: Literal["LegacySessionTokenAuthenticator"]
header: str = Field(
...,
description="The name of the session token header that will be injected in the request",
examples=["X-Session"],
title="Session Request Header",
)
login_url: str = Field(
...,
description="Path of the login URL (do not include the base URL)",
examples=["session"],
title="Login Path",
)
session_token: Optional[str] = Field(
None,
description="Session token to use if using a pre-defined token. Not needed if authenticating with username + password pair",
example=["{{ config['session_token'] }}"],
title="Session Token",
)
session_token_response_key: str = Field(
...,
description="Name of the key of the session token to be extracted from the response",
examples=["id"],
title="Response Token Response Key",
)
username: Optional[str] = Field(
None,
description="Username used to authenticate and obtain a session token",
examples=[" {{ config['username'] }}"],
title="Username",
)
password: Optional[str] = Field(
"",
description="Password used to authenticate and obtain a session token",
examples=["{{ config['password'] }}", ""],
title="Password",
)
validate_session_url: str = Field(
...,
description="Path of the URL to use to validate that the session token is valid (do not include the base URL)",
examples=["user/current"],
title="Validate Session Path",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| LegacySessionTokenAuthenticator |
python | django__django | tests/admin_checks/models.py | {
"start": 1350,
"end": 1422
} | class ____(models.Model):
name = models.CharField(max_length=15)
| State |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py | {
"start": 53331,
"end": 56629
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Phi4MultimodalConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.num_key_value_heads = config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
qkv = self.qkv_proj(hidden_states)
query_pos = self.config.num_attention_heads * self.head_dim
query_states = qkv[..., :query_pos]
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None),
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Phi4MultimodalAttention |
python | jazzband__django-simple-history | simple_history/tests/view.py | {
"start": 2679,
"end": 2772
} | class ____(DeleteView):
model = Poll
success_url = reverse_lazy("poll-list")
| PollDelete |
python | walkccc__LeetCode | solutions/108. Convert Sorted Array to Binary Search Tree/108.py | {
"start": 0,
"end": 336
} | class ____:
def sortedArrayToBST(self, nums: list[int]) -> TreeNode | None:
def build(l: int, r: int) -> TreeNode | None:
if l > r:
return None
m = (l + r) // 2
return TreeNode(nums[m],
build(l, m - 1),
build(m + 1, r))
return build(0, len(nums) - 1)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/treemap/_legendgrouptitle.py | {
"start": 233,
"end": 2939
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "treemap"
_path_str = "treemap.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.treemap.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.treemap.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.