language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/issues/status_change_message.py | {
"start": 216,
"end": 512
} | class ____(TypedDict):
fingerprint: Sequence[str]
project_id: int
new_status: int
new_substatus: int | None
id: str
detector_id: int | None
activity_data: dict[str, Any] | None
update_date: NotRequired[datetime | None]
@dataclass(frozen=True)
| StatusChangeMessageData |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore_warm_start.py | {
"start": 5541,
"end": 6352
} | class ____(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self, analysis=None):
space = {"width": (0, 20), "height": (-100, 100)}
def cost(space):
tune.report(
dict(loss=(space["height"] - 14) ** 2 - abs(space["width"] - 3))
)
search_alg = BayesOptSearch(space, metric="loss", mode="min", analysis=analysis)
return search_alg, cost
def testBootStrapAnalysis(self):
analysis = self.run_full()
search_alg3, cost = self.set_basic_conf(analysis)
if not isinstance(search_alg3, ConcurrencyLimiter):
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
tune.run(
cost, num_samples=10, search_alg=search_alg3, verbose=0, reuse_actors=True
)
| BayesoptWarmStartTest |
python | redis__redis-py | tests/test_cluster.py | {
"start": 114100,
"end": 116541
} | class ____:
"""
Tests for the ClusterPubSub class
"""
def test_init_pubsub_with_host_and_port(self, r):
"""
Test creation of pubsub instance with passed host and port
"""
node = r.get_default_node()
p = r.pubsub(host=node.host, port=node.port)
assert p.get_pubsub_node() == node
def test_init_pubsub_with_node(self, r):
"""
Test creation of pubsub instance with passed node
"""
node = r.get_default_node()
p = r.pubsub(node=node)
assert p.get_pubsub_node() == node
def test_init_pubusub_without_specifying_node(self, r):
"""
Test creation of pubsub instance without specifying a node. The node
should be determined based on the keyslot of the first command
execution.
"""
channel_name = "foo"
node = r.get_node_from_key(channel_name)
p = r.pubsub()
assert p.get_pubsub_node() is None
p.subscribe(channel_name)
assert p.get_pubsub_node() == node
def test_init_pubsub_with_a_non_existent_node(self, r):
"""
Test creation of pubsub instance with node that doesn't exists in the
cluster. RedisClusterException should be raised.
"""
node = ClusterNode("1.1.1.1", 1111)
with pytest.raises(RedisClusterException):
r.pubsub(node)
def test_init_pubsub_with_a_non_existent_host_port(self, r):
"""
Test creation of pubsub instance with host and port that don't belong
to a node in the cluster.
RedisClusterException should be raised.
"""
with pytest.raises(RedisClusterException):
r.pubsub(host="1.1.1.1", port=1111)
def test_init_pubsub_host_or_port(self, r):
"""
Test creation of pubsub instance with host but without port, and vice
versa. DataError should be raised.
"""
with pytest.raises(DataError):
r.pubsub(host="localhost")
with pytest.raises(DataError):
r.pubsub(port=16379)
def test_get_redis_connection(self, r):
"""
Test that get_redis_connection() returns the redis connection of the
set pubsub node
"""
node = r.get_default_node()
p = r.pubsub(node=node)
assert p.get_redis_connection() == node.redis_connection
@pytest.mark.onlycluster
| TestClusterPubSubObject |
python | ray-project__ray | python/ray/dashboard/modules/job/common.py | {
"start": 22202,
"end": 22324
} | class ____:
# DEPRECATED: Use submission_id instead.
job_id: str
submission_id: str
@dataclass
| JobSubmitResponse |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 63838,
"end": 64333
} | class ____:
xlBetween = 1 # from enum XlFormatConditionOperator
xlEqual = 3 # from enum XlFormatConditionOperator
xlGreater = 5 # from enum XlFormatConditionOperator
xlGreaterEqual = 7 # from enum XlFormatConditionOperator
xlLess = 6 # from enum XlFormatConditionOperator
xlLessEqual = 8 # from enum XlFormatConditionOperator
xlNotBetween = 2 # from enum XlFormatConditionOperator
xlNotEqual = 4 # from enum XlFormatConditionOperator
| FormatConditionOperator |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 4815,
"end": 5523
} | class ____(_FusedModule):
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert (
type_before_parametrizations(conv) == Conv2d
and type_before_parametrizations(bn) == BatchNorm2d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(conv)}"
f"{type_before_parametrizations(bn)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(conv, bn, relu)
| ConvBnReLU2d |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_logging_sink.py | {
"start": 15107,
"end": 18415
} | class ____:
def test_template_fields(self):
operator = CloudLoggingListSinksOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
)
assert "project_id" in operator.template_fields
def test_missing_required_params(self):
with pytest.raises(AirflowException) as excinfo:
CloudLoggingListSinksOperator(
task_id=TASK_ID,
project_id=None,
).execute(context={})
assert "Required parameters are missing" in str(excinfo.value)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
def test_list_sinks_success(self, hook_mock):
hook_instance = hook_mock.return_value
hook_instance.list_sinks.return_value = [sink, sink]
operator = CloudLoggingListSinksOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
page_size=50,
)
result = operator.execute(context=mock.MagicMock())
hook_mock.assert_called_once()
_, kwargs = hook_mock.call_args
assert kwargs == {
"gcp_conn_id": "google_cloud_default",
"impersonation_chain": None,
}
hook_instance.list_sinks.assert_called_once_with(
project_id=PROJECT_ID,
page_size=50,
)
assert result == [LogSink.to_dict(sink) for sink in [sink, sink]]
def test_negative_page_size_raises_exception(self):
with pytest.raises(
AirflowException, match="The page_size for the list sinks request must be greater than zero"
):
CloudLoggingListSinksOperator(task_id="fail-task", project_id=PROJECT_ID, page_size=-1).execute(
context={}
)
def test_missing_rendered_field_raises(self):
with DAG(
dag_id="test_render_native",
start_date=datetime(1997, 9, 25),
render_template_as_native_obj=True,
) as dag:
operator = CloudLoggingListSinksOperator(
task_id=TASK_ID, project_id="{{ var.value.project_id }}", dag=dag
)
context = {
"var": {"value": {"project_id": None}},
}
operator.render_template_fields(context)
with pytest.raises(
AirflowException,
match=re.escape(
"Required parameters are missing: ['project_id']. These must be passed as keyword parameters."
),
):
operator.execute(context)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
def test_template_rendering(self, hook_mock):
with DAG(
dag_id="test_render_native",
start_date=datetime(2024, 1, 1),
render_template_as_native_obj=True,
) as dag:
operator = CloudLoggingListSinksOperator(
task_id=TASK_ID, project_id="{{ var.value.project_id }}", dag=dag
)
context = {
"var": {"value": {"project_id": PROJECT_ID}},
}
hook_instance = hook_mock.return_value
hook_instance.list_sinks.return_value = [sink]
operator.render_template_fields(context)
operator.execute(context)
assert isinstance(operator.project_id, str)
assert operator.project_id == PROJECT_ID
| TestCloudLoggingListSinksOperator |
python | PrefectHQ__prefect | src/prefect/_versioning.py | {
"start": 465,
"end": 690
} | class ____(VersionInfo):
type: Literal["prefect:simple"] = "prefect:simple"
version: str = Field(default="")
branch: Optional[str] = Field(default=None)
url: Optional[str] = Field(default=None)
| SimpleVersionInfo |
python | PyCQA__pylint | tests/functional/r/regression/regression_property_no_member_870.py | {
"start": 123,
"end": 372
} | class ____:
def __init__(self, val=None):
self._val = val
@property
def val(self):
return self._val
@val.setter
def val(self, value):
self._val = value
if __name__ == '__main__':
print(X([]).val.append)
| X |
python | Textualize__textual | tests/css/test_nested_css.py | {
"start": 2008,
"end": 3198
} | class ____(App[None]):
CSS = """
Screen {
background: green;
Label {
background: red;
}
}
"""
def compose(self) -> ComposeResult:
yield Label("one")
async def test_rule_declaration_after_nested() -> None:
"""Regression test for https://github.com/Textualize/textual/issues/3999."""
app = DeclarationAfterNestedApp()
async with app.run_test():
assert app.screen.styles.background == Color.parse("green")
assert app.query_one(Label).styles.background == Color.parse("red")
@pytest.mark.parametrize(
("css", "exception"),
[
("Selector {", UnexpectedEnd),
("Selector{ Foo {", UnexpectedEnd),
("Selector{ Foo {}", UnexpectedEnd),
("> {}", TokenError),
("&", TokenError),
("&&", TokenError),
("&.foo", TokenError),
("& .foo", TokenError),
("{", TokenError),
("*{", UnexpectedEnd),
],
)
def test_parse_errors(css: str, exception: type[Exception]) -> None:
"""Check some CSS which should fail."""
with pytest.raises(exception):
list(parse("", css, ("foo", "")))
| DeclarationAfterNestedApp |
python | doocs__leetcode | solution/2200-2299/2263.Make Array Non-decreasing or Non-increasing/Solution.py | {
"start": 0,
"end": 486
} | class ____:
def convertArray(self, nums: List[int]) -> int:
def solve(nums):
n = len(nums)
f = [[0] * 1001 for _ in range(n + 1)]
for i, x in enumerate(nums, 1):
mi = inf
for j in range(1001):
if mi > f[i - 1][j]:
mi = f[i - 1][j]
f[i][j] = mi + abs(x - j)
return min(f[n])
return min(solve(nums), solve(nums[::-1]))
| Solution |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 27599,
"end": 28195
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a block schema."""
fields: dict[str, Any] = Field(
default_factory=dict, description="The block schema's field schema"
)
block_type_id: UUID = Field(default=..., description="A block type ID")
capabilities: List[str] = Field(
default_factory=list,
description="A list of Block capabilities",
)
version: str = Field(
default=schemas.core.DEFAULT_BLOCK_SCHEMA_VERSION,
description="Human readable identifier for the block schema",
)
| BlockSchemaCreate |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg2.py | {
"start": 20951,
"end": 21041
} | class ____(_Psycopg2Range):
_psycopg2_range_cls = "DateTimeRange"
| _Psycopg2DateTimeRange |
python | wandb__wandb | wandb/vendor/pygments/styles/vim.py | {
"start": 407,
"end": 1976
} | class ____(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| VimStyle |
python | astropy__astropy | astropy/cosmology/_src/tests/test_core.py | {
"start": 2370,
"end": 2993
} | class ____:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert cosmo_cls.meta is None
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = next(iter(cosmo.meta.keys())) # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
| MetaTestMixin |
python | plotly__plotly.py | plotly/graph_objs/layout/newshape/legendgrouptitle/_font.py | {
"start": 235,
"end": 9970
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.newshape.legendgrouptitle"
_path_str = "layout.newshape.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.newshap
e.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.newshape.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.newshape.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | euske__pdfminer | pdfminer/pdfinterp.py | {
"start": 2583,
"end": 3497
} | class ____:
def __init__(self):
self.linewidth = 0
self.linecap = None
self.linejoin = None
self.miterlimit = None
self.dash = None
self.intent = None
self.flatness = None
return
def copy(self):
obj = PDFGraphicState()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
return obj
def __repr__(self):
return ('<PDFGraphicState: linewidth=%r, linecap=%r, linejoin=%r, '
' miterlimit=%r, dash=%r, intent=%r, flatness=%r>' %
(self.linewidth, self.linecap, self.linejoin,
self.miterlimit, self.dash, self.intent, self.flatness))
## Resource Manager
##
| PDFGraphicState |
python | astropy__astropy | astropy/units/equivalencies.py | {
"start": 1210,
"end": 30761
} | class ____(list):
"""
A container for a units equivalency.
Attributes
----------
name: `str`
The name of the equivalency.
kwargs: `dict`
Any positional or keyword arguments used to make the equivalency.
"""
def __init__(self, equiv_list, name="", kwargs=None):
super().__init__(equiv_list)
self.name = [name]
self.kwargs = [kwargs] if kwargs is not None else [{}]
def __add__(self, other):
if isinstance(other, Equivalency):
# The super() returns a list, which is really a bit weird,
# but that means we have to pass it back through the initializer.
new = self.__class__(super().__add__(other))
# Avoid the change to list of the name and kwargs arguments.
new.name = self.name + other.name
new.kwargs = self.kwargs + other.kwargs
return new
else:
return super().__add__(other) # Let list take care.
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.name == other.name
and self.kwargs == other.kwargs
)
@functools.cache
def dimensionless_angles():
"""Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1).
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the angle is raised,
and independent of whether it is part of a more complicated unit.
"""
return Equivalency([(si.radian, None)], "dimensionless_angles")
@functools.cache
def logarithmic():
"""Allow logarithmic units to be converted to dimensionless fractions."""
return Equivalency(
[(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],
"logarithmic",
)
@functools.cache
def parallax():
"""
Returns a list of equivalence pairs that handle the conversion
between parallax angle and distance.
"""
def parallax_converter(x):
x = np.asanyarray(x)
d = 1 / x
if np.iterable(d):
d[d < 0] = np.nan
return d
else:
if d < 0:
return np.array(np.nan)
else:
return d
return Equivalency(
[(si.arcsecond, astrophys.parsec, parallax_converter)], "parallax"
)
@functools.cache
def spectral():
"""
Returns a list of equivalence pairs that handle spectral
wavelength, wave number, frequency, and energy equivalencies.
Allows conversions between wavelength units, wave number units,
frequency units, and energy units as they relate to light.
There are two types of wave number:
* spectroscopic - :math:`1 / \\lambda` (per meter)
* angular - :math:`2 \\pi / \\lambda` (radian per meter)
"""
c = _si.c.value
h = _si.h.value
hc = h * c
two_pi = 2.0 * np.pi
inv_m_spec = si.m**-1
inv_m_ang = si.radian / si.m
return Equivalency(
[
(si.m, si.Hz, lambda x: c / x),
(si.m, si.J, lambda x: hc / x),
(si.Hz, si.J, lambda x: h * x, lambda x: x / h),
(si.m, inv_m_spec, lambda x: 1.0 / x),
(si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),
(si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),
(inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),
(si.m, inv_m_ang, lambda x: two_pi / x),
(si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),
(si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),
],
"spectral",
)
@deprecated_renamed_argument(
"factor", None, since="7.0", alternative='"wav" as a "Quantity"'
)
def spectral_density(wav, factor=None):
"""
Returns a list of equivalence pairs that handle spectral density
with regard to wavelength and frequency.
Parameters
----------
wav : `~astropy.units.Quantity`
`~astropy.units.Quantity` associated with values being converted
(e.g., wavelength or frequency).
factor : array_like
If ``wav`` is a |Unit| instead of a |Quantity| then ``factor``
is the value ``wav`` will be multiplied with to convert it to
a |Quantity|.
.. deprecated:: 7.0
``factor`` is deprecated. Pass in ``wav`` as a |Quantity|,
not as a |Unit|.
"""
from .core import UnitBase
if isinstance(wav, UnitBase):
if factor is None:
raise ValueError("If `wav` is specified as a unit, `factor` should be set")
wav = factor * wav # Convert to Quantity
c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s
h_cgs = _si.h.cgs.value # erg * s
hc = c_Aps * h_cgs
# flux density
f_la = cgs.erg / si.angstrom / si.cm**2 / si.s
f_nu = cgs.erg / si.Hz / si.cm**2 / si.s
nu_f_nu = cgs.erg / si.cm**2 / si.s
la_f_la = nu_f_nu
phot_f_la = astrophys.photon / (si.cm**2 * si.s * si.AA)
phot_f_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz)
la_phot_f_la = astrophys.photon / (si.cm**2 * si.s)
# luminosity density
L_nu = cgs.erg / si.s / si.Hz
L_la = cgs.erg / si.s / si.angstrom
nu_L_nu = cgs.erg / si.s
la_L_la = nu_L_nu
phot_L_la = astrophys.photon / (si.s * si.AA)
phot_L_nu = astrophys.photon / (si.s * si.Hz)
# surface brightness (flux equiv)
S_la = cgs.erg / si.angstrom / si.cm**2 / si.s / si.sr
S_nu = cgs.erg / si.Hz / si.cm**2 / si.s / si.sr
nu_S_nu = cgs.erg / si.cm**2 / si.s / si.sr
la_S_la = nu_S_nu
phot_S_la = astrophys.photon / (si.cm**2 * si.s * si.AA * si.sr)
phot_S_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz * si.sr)
# surface brightness (luminosity equiv)
SL_nu = cgs.erg / si.s / si.Hz / si.sr
SL_la = cgs.erg / si.s / si.angstrom / si.sr
nu_SL_nu = cgs.erg / si.s / si.sr
la_SL_la = nu_SL_nu
phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr)
phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr)
def f_la_to_f_nu(x):
return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def f_nu_to_nu_f_nu(x):
return x * wav.to_value(si.Hz, spectral())
def f_nu_from_nu_f_nu(x):
return x / wav.to_value(si.Hz, spectral())
def f_la_to_la_f_la(x):
return x * wav.to_value(si.AA, spectral())
def f_la_from_la_f_la(x):
return x / wav.to_value(si.AA, spectral())
def phot_f_la_to_f_la(x):
return hc * x / wav.to_value(si.AA, spectral())
def phot_f_la_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) / hc
def phot_f_la_to_f_nu(x):
return h_cgs * x * wav.to_value(si.AA, spectral())
def phot_f_la_from_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) * h_cgs)
def phot_f_la_to_phot_f_nu(x):
return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps
def phot_f_la_from_phot_f_nu(x):
return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2
phot_f_nu_to_f_nu = phot_f_la_to_f_la
phot_f_nu_from_f_nu = phot_f_la_from_f_la
def phot_f_nu_to_f_la(x):
return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3
def phot_f_nu_from_f_la(x):
return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)
# for luminosity density
L_nu_to_nu_L_nu = f_nu_to_nu_f_nu
L_nu_from_nu_L_nu = f_nu_from_nu_f_nu
L_la_to_la_L_la = f_la_to_la_f_la
L_la_from_la_L_la = f_la_from_la_f_la
phot_L_la_to_L_la = phot_f_la_to_f_la
phot_L_la_from_L_la = phot_f_la_from_f_la
phot_L_la_to_L_nu = phot_f_la_to_f_nu
phot_L_la_from_L_nu = phot_f_la_from_f_nu
phot_L_la_to_phot_L_nu = phot_f_la_to_phot_f_nu
phot_L_la_from_phot_L_nu = phot_f_la_from_phot_f_nu
phot_L_nu_to_L_nu = phot_f_nu_to_f_nu
phot_L_nu_from_L_nu = phot_f_nu_from_f_nu
phot_L_nu_to_L_la = phot_f_nu_to_f_la
phot_L_nu_from_L_la = phot_f_nu_from_f_la
return Equivalency(
[
# flux
(f_la, f_nu, f_la_to_f_nu, f_la_from_f_nu),
(f_nu, nu_f_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(f_la, la_f_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_f_la, f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_f_la, f_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_f_la, phot_f_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_f_nu, f_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_f_nu, f_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# integrated flux
(la_phot_f_la, la_f_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
# luminosity
(L_la, L_nu, f_la_to_f_nu, f_la_from_f_nu),
(L_nu, nu_L_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(L_la, la_L_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_L_la, L_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_L_la, L_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_L_la, phot_L_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_L_nu, L_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_L_nu, L_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
# surface brightness (flux equiv)
(S_la, S_nu, f_la_to_f_nu, f_la_from_f_nu),
(S_nu, nu_S_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu),
(S_la, la_S_la, f_la_to_la_f_la, f_la_from_la_f_la),
(phot_S_la, S_la, phot_f_la_to_f_la, phot_f_la_from_f_la),
(phot_S_la, S_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu),
(phot_S_la, phot_S_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu),
(phot_S_nu, S_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu),
(phot_S_nu, S_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la),
# surface brightness (luminosity equiv)
(SL_la, SL_nu, f_la_to_f_nu, f_la_from_f_nu),
(SL_nu, nu_SL_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu),
(SL_la, la_SL_la, L_la_to_la_L_la, L_la_from_la_L_la),
(phot_SL_la, SL_la, phot_L_la_to_L_la, phot_L_la_from_L_la),
(phot_SL_la, SL_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu),
(phot_SL_la, phot_SL_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu),
(phot_SL_nu, SL_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu),
(phot_SL_nu, SL_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la),
],
"spectral_density",
{"wav": wav, "factor": factor},
)
def doppler_radio(rest):
r"""
Return the equivalency pairs for the radio convention for velocity.
The radio convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> radio_CO_equiv = u.doppler_radio(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv)
>>> radio_velocity # doctest: +FLOAT_CMP
<Quantity -31.209092088877583 km / s>
"""
assert_is_spectral_unit(rest)
rest_in = functools.partial(rest.to_value, equivalencies=spectral())
to_funcs = {
misc.eV: lambda x: (1 - x / rest_in(misc.eV)) * ckms,
si.Hz: lambda x: (1 - x / rest_in(si.Hz)) * ckms,
si.AA: lambda x: (1 - rest_in(si.AA) / x) * ckms,
}
from_funcs = {
misc.eV: lambda x: rest_in(misc.eV) * (1 - x / ckms),
si.Hz: lambda x: rest_in(si.Hz) * (1 - x / ckms),
si.AA: lambda x: rest_in(si.AA) / (1 - x / ckms),
}
return Equivalency(
[
(unit, km_per_s, to_func, from_funcs[unit])
for unit, to_func in to_funcs.items()
],
"doppler_radio",
{"rest": rest},
)
def doppler_optical(rest):
r"""
Return the equivalency pairs for the optical convention for velocity.
The optical convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> optical_CO_equiv = u.doppler_optical(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv)
>>> optical_velocity # doctest: +FLOAT_CMP
<Quantity -31.20584348799674 km / s>
"""
assert_is_spectral_unit(rest)
rest_in = functools.partial(rest.to_value, equivalencies=spectral())
to_funcs = {
misc.eV: lambda x: (rest_in(misc.eV) / x - 1) * ckms,
si.Hz: lambda x: (rest_in(si.Hz) / x - 1) * ckms,
si.AA: lambda x: (x / rest_in(si.AA) - 1) * ckms,
}
from_funcs = {
misc.eV: lambda x: rest_in(misc.eV) / (1 + x / ckms),
si.Hz: lambda x: rest_in(si.Hz) / (1 + x / ckms),
si.AA: lambda x: rest_in(si.AA) * (1 + x / ckms),
}
return Equivalency(
[
(unit, km_per_s, to_func, from_funcs[unit])
for unit, to_func in to_funcs.items()
],
"doppler_optical",
{"rest": rest},
)
def doppler_relativistic(rest):
r"""
Return the equivalency pairs for the relativistic convention for velocity.
The full relativistic convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv)
>>> relativistic_velocity # doctest: +FLOAT_CMP
<Quantity -31.207467619351537 km / s>
>>> measured_velocity = 1250 * u.km/u.s
>>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv)
>>> relativistic_frequency # doctest: +FLOAT_CMP
<Quantity 114.79156866993588 GHz>
>>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv)
>>> relativistic_wavelength # doctest: +FLOAT_CMP
<Quantity 2.6116243681798923 mm>
"""
assert_is_spectral_unit(rest)
rest_in = functools.partial(rest.to_value, equivalencies=spectral())
def to_vel_freq(x):
restfreq2 = rest_in(si.Hz) ** 2
return (restfreq2 - x**2) / (restfreq2 + x**2) * ckms
def from_vel_freq(x):
voverc = x / ckms
return rest_in(si.Hz) * ((1 - voverc) / (1 + voverc)) ** 0.5
def to_vel_wav(x):
restwav2 = rest_in(si.AA) ** 2
return (x**2 - restwav2) / (restwav2 + x**2) * ckms
def from_vel_wav(x):
voverc = x / ckms
return rest_in(si.AA) * ((1 + voverc) / (1 - voverc)) ** 0.5
def to_vel_en(x):
resten2 = rest_in(misc.eV) ** 2
return (resten2 - x**2) / (resten2 + x**2) * ckms
def from_vel_en(x):
voverc = x / ckms
return rest_in(misc.eV) * ((1 - voverc) / (1 + voverc)) ** 0.5
return Equivalency(
[
(si.Hz, km_per_s, to_vel_freq, from_vel_freq),
(si.AA, km_per_s, to_vel_wav, from_vel_wav),
(misc.eV, km_per_s, to_vel_en, from_vel_en),
],
"doppler_relativistic",
{"rest": rest},
)
@functools.cache
def doppler_redshift():
"""
Returns the equivalence between Doppler redshift (unitless) and radial velocity.
.. note::
This equivalency is not compatible with cosmological
redshift in `astropy.cosmology.units`.
"""
rv_unit = si.km / si.s
C_KMS = _si.c.to_value(rv_unit)
def convert_z_to_rv(z):
zponesq = (1 + z) ** 2
return C_KMS * (zponesq - 1) / (zponesq + 1)
def convert_rv_to_z(rv):
beta = rv / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
return Equivalency(
[(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)],
"doppler_redshift",
)
@functools.cache
def molar_mass_amu():
"""
Returns the equivalence between amu and molar mass.
"""
return Equivalency([(si.g / si.mol, misc.u)], "molar_mass_amu")
@functools.cache
def mass_energy():
"""
Returns a list of equivalence pairs that handle the conversion
between mass and energy.
"""
c2 = _si.c.value**2
return Equivalency(
[
(si.kg, si.J, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2),
(si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2),
],
"mass_energy",
)
def brightness_temperature(frequency, beam_area=None):
r"""
Defines the conversion between Jy/sr and "brightness temperature",
:math:`T_B`, in Kelvins. The brightness temperature is a unit very
commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy"
(Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google
books
<https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__).
:math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)`
If the input is in Jy/beam or Jy (assuming it came from a single beam), the
beam area is essential for this computation: the brightness temperature is
inversely proportional to the beam area.
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). The variable is named 'frequency' because it
is more commonly used in radio astronomy.
BACKWARD COMPATIBILITY NOTE: previous versions of the brightness
temperature equivalency used the keyword ``disp``, which is no longer
supported.
beam_area : `~astropy.units.Quantity` ['solid angle']
Beam area in angular units, i.e. steradian equivalent
Examples
--------
Arecibo C-band beam::
>>> import numpy as np
>>> from astropy import units as u
>>> beam_sigma = 50*u.arcsec
>>> beam_area = 2*np.pi*(beam_sigma)**2
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 3.526295144567176 K>
VLA synthetic beam::
>>> bmaj = 15*u.arcsec
>>> bmin = 15*u.arcsec
>>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
>>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2)
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 217.2658703625732 K>
Any generic surface brightness:
>>> surf_brightness = 1e6*u.MJy/u.sr
>>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP
<Quantity 130.1931904778803 K>
"""
nu = frequency.to(si.GHz, spectral())
factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
if beam_area is not None:
beam = beam_area.to_value(si.sr)
def convert_Jy_to_K(x_jybm):
return x_jybm / beam / factor_Jy
def convert_K_to_Jy(x_K):
return x_K * beam / factor_K
return Equivalency(
[
(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),
(astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),
],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
else:
def convert_JySr_to_K(x_jysr):
return x_jysr / factor_Jy
def convert_K_to_JySr(x_K):
return x_K / factor_K # multiplied by 1x for 1 steradian
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],
"brightness_temperature",
{"frequency": frequency, "beam_area": beam_area},
)
def beam_angular_area(beam_area):
"""
Convert between the ``beam`` unit, which is commonly used to express the area
of a radio telescope resolution element, and an area on the sky.
This equivalency also supports direct conversion between ``Jy/beam`` and
``Jy/steradian`` units, since that is a common operation.
Parameters
----------
beam_area : unit-like
The area of the beam in angular area units (e.g., steradians)
Must have angular area equivalent units.
"""
return Equivalency(
[
(astrophys.beam, Unit(beam_area)),
(astrophys.beam**-1, Unit(beam_area) ** -1),
(astrophys.Jy / astrophys.beam, astrophys.Jy / Unit(beam_area)),
],
"beam_angular_area",
{"beam_area": beam_area},
)
def thermodynamic_temperature(frequency, T_cmb=None):
r"""Defines the conversion between Jy/sr and "thermodynamic temperature",
:math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very
commonly used in cosmology. See eqn 8 in [1].
:math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)`
with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}`
where :math:`x = h \nu / k T`
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed `spectral` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). Must have spectral units.
T_cmb : `~astropy.units.Quantity` ['temperature'] or None
The CMB temperature at z=0. If `None`, the default cosmology will be
used to get this temperature. Must have units of temperature.
Notes
-----
For broad band receivers, this conversion do not hold
as it highly depends on the frequency
References
----------
.. [1] Planck 2013 results. IX. HFI spectral response
https://arxiv.org/abs/1303.5070
Examples
--------
Planck HFI 143 GHz::
>>> from astropy import units as u
>>> from astropy.cosmology import Planck15
>>> freq = 143 * u.GHz
>>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0)
>>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 0.37993172 MJy / sr>
"""
nu = frequency.to(si.GHz, spectral())
if T_cmb is None:
from astropy.cosmology import default_cosmology
T_cmb = default_cosmology.get().Tcmb0
def f(nu, T_cmb=T_cmb):
x = _si.h * nu / _si.k_B / T_cmb
return x**2 * np.exp(x) / np.expm1(x) ** 2
def convert_Jy_to_K(x_jybm):
factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy)
return x_jybm / factor
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(
si.K
)
return x_K / factor
return Equivalency(
[(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"thermodynamic_temperature",
{"frequency": frequency, "T_cmb": T_cmb},
)
@functools.cache
def temperature():
"""Convert degrees Celsius and degrees Fahrenheit here because
Unit and CompositeUnit cannot do addition or subtraction properly.
"""
from .imperial import deg_F as F
K = si.K
C = si.deg_C
return Equivalency(
[
(K, C, lambda x: x - 273.15, lambda x: x + 273.15),
(C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),
(K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),
],
"temperature",
)
@functools.cache
def temperature_energy():
"""Convert between Kelvin and keV(eV) to an equivalent amount."""
e = _si.e.value
k_B = _si.k_B.value
return Equivalency(
[(si.K, misc.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],
"temperature_energy",
)
def assert_is_spectral_unit(value):
try:
value.to(si.Hz, spectral())
except (AttributeError, UnitsError) as ex:
raise UnitsError(
"The 'rest' value must be a spectral equivalent "
"(frequency, wavelength, or energy)."
)
def pixel_scale(pixscale):
"""
Convert between pixel distances (in units of ``pix``) and other units,
given a particular ``pixscale``.
Parameters
----------
pixscale : `~astropy.units.Quantity`
The pixel scale either in units of <unit>/pixel or pixel/<unit>.
"""
decomposed = pixscale.unit.decompose()
dimensions = dict(zip(decomposed.bases, decomposed.powers))
pix_power = dimensions.get(misc.pix, 0)
if pix_power == -1:
physical_unit = Unit(pixscale * misc.pix)
elif pix_power == 1:
physical_unit = Unit(misc.pix / pixscale)
else:
raise UnitsError(
"The pixel scale unit must have pixel dimensionality of 1 or -1."
)
return Equivalency(
[(misc.pix, physical_unit)], "pixel_scale", {"pixscale": pixscale}
)
def plate_scale(platescale):
"""
Convert between lengths (to be interpreted as lengths in the focal plane)
and angular units with a specified ``platescale``.
Parameters
----------
platescale : `~astropy.units.Quantity`
The pixel scale either in units of distance/pixel or distance/angle.
"""
if platescale.unit.is_equivalent(si.arcsec / si.m):
platescale_val = platescale.to_value(si.radian / si.m)
elif platescale.unit.is_equivalent(si.m / si.arcsec):
platescale_val = (1 / platescale).to_value(si.radian / si.m)
else:
raise UnitsError("The pixel scale must be in angle/distance or distance/angle")
return Equivalency(
[(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],
"plate_scale",
{"platescale": platescale},
)
def magnetic_flux_field(mu_r=1):
r"""
Convert magnetic field between magnetic field strength :math:`(\mathbf{H})` and
magnetic flux density :math:`(\mathbf{B})` using the relationship:
.. math::
\mathbf{B} = \mu_r \mu_0 \mathbf{H}
where:
- :math:`\mu_0` is the vacuum permeability, a physical constant.
- :math:`\mu_r` is the relative permeability of the medium, a dimensionless
quantity.
The default setting (:math:`\mu_r=1`) represents conditions in a vacuum.
Parameters
----------
mu_r : float, optional
The relative magnetic permeability of the medium. This is a dimensionless quantity
and has a default value of :math:`\mu_r=1` which corresponds to free space (vacuum).
Examples
--------
>>> import astropy.units as u
>>> H = 1 * u.Oe
>>> H.to(u.G, equivalencies=u.magnetic_flux_field()) # doctest: +FLOAT_CMP
<Quantity 1. G>
>>> H.to(u.G, equivalencies=u.magnetic_flux_field(mu_r=0.8)) # doctest: +FLOAT_CMP
<Quantity 0.8 G>
>>> B = 1 * u.T
>>> B.to(u.A / u.m, equivalencies=u.magnetic_flux_field()) # doctest: +FLOAT_CMP
<Quantity 795774.71502628 A / m>
>>> B.to(u.A / u.m, equivalencies=u.magnetic_flux_field(mu_r=0.8)) # doctest: +FLOAT_CMP
<Quantity 994718.39378285 A / m>
"""
mu0 = _si.mu0.value
return Equivalency(
[(si.T, si.A / si.m, lambda x: x / (mu_r * mu0), lambda x: x * mu_r * mu0)],
"magnetic_flux_field",
)
def zero_point_flux(flux0):
"""
An equivalency for converting linear flux units ("maggys") defined relative
to a standard source into a standardized system.
Parameters
----------
flux0 : `~astropy.units.Quantity`
The flux of a magnitude-0 object in the "maggy" system.
"""
return Equivalency([(maggy, Unit(flux0))], "zero_point_flux")
| Equivalency |
python | getsentry__sentry | src/sentry/db/models/manager/option.py | {
"start": 260,
"end": 925
} | class ____(BaseManager[M]):
@property
def _option_cache(self) -> dict[str, dict[str, Any]]:
if not hasattr(_local_cache, "option_cache"):
_local_cache.option_cache = {}
return _local_cache.option_cache
def clear_local_cache(self, **kwargs: Any) -> None:
self._option_cache.clear()
def contribute_to_class(self, model: type[Model], name: str) -> None:
super().contribute_to_class(model, name)
request_finished.connect(self.clear_local_cache)
def _make_key(self, instance_id: int | str) -> str:
assert instance_id
return f"{self.model._meta.db_table}:{instance_id}"
| OptionManager |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/losses_test.py | {
"start": 1532,
"end": 4832
} | class ____(test.TestCase):
def setUp(self):
super(AbsoluteDifferenceLossTest, self).setUp()
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.cached_session():
with self.assertRaises(ValueError):
losses.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = losses.absolute_difference(self._predictions, self._predictions)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
def testNonZeroLoss(self):
loss = losses.absolute_difference(self._labels, self._predictions)
with self.cached_session():
self.assertAlmostEqual(5.5, self.evaluate(loss), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions,
constant_op.constant(weights))
with self.cached_session():
self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant((1.2, 0.0), shape=(2, 1))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.6, self.evaluate(loss), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(5.6, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(16.6, self.evaluate(loss), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(6.0, self.evaluate(loss), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testEagerNoMemoryLeaked(self):
# This is a somewhat convoluted way of testing that nothing gets added to
# a global collection.
predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
losses.absolute_difference(labels, predictions)
| AbsoluteDifferenceLossTest |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 87463,
"end": 87550
} | class ____(openblas_ilp64_lapack_info, openblas64__info):
pass
| openblas64__lapack_info |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/types.py | {
"start": 13515,
"end": 14029
} | class ____(AirbyteSource):
"""Base class used by the codegen Airbyte sources. This class is not intended to be used directly.
Converts all of its attributes into a source configuration dict which is passed down to the base
AirbyteSource class.
"""
def __init__(self, source_type: str, name: str):
source_configuration = _dump_class(self)
super().__init__(
name=name, source_type=source_type, source_configuration=source_configuration
)
| GeneratedAirbyteSource |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/pretty.py | {
"start": 17234,
"end": 36436
} | class ____:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def _is_namedtuple(obj: Any) -> bool:
"""Checks if an object is most likely a namedtuple. It is possible
to craft an object that passes this check and isn't a namedtuple, but
there is only a minuscule chance of this happening unintentionally.
Args:
obj (Any): The object to test
Returns:
bool: True if the object is a namedtuple. False otherwise.
"""
try:
fields = getattr(obj, "_fields", None)
except Exception:
# Being very defensive - if we cannot get the attr then its not a namedtuple
return False
return isinstance(obj, tuple) and isinstance(fields, tuple)
def traverse(
_object: Any,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and _safe_isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
"""Walk the object depth first."""
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
obj_type = type(obj)
children: List[Node]
reached_max_depth = max_depth is not None and depth >= max_depth
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if _safe_isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
push_visited(obj_id)
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if reached_max_depth:
if angular:
node = Node(value_repr=f"<{class_name}...>")
else:
node = Node(value_repr=f"{class_name}(...)")
else:
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if _safe_isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child, depth=depth + 1)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg, depth=depth + 1)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
pop_visited(obj_id)
elif _is_attr_object(obj) and not fake_attributes:
push_visited(obj_id)
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> (
Iterable[Tuple[str, Any, Optional[Callable[[Any], str]]]]
):
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value, depth=depth + 1)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
pop_visited(obj_id)
elif (
is_dataclass(obj)
and not _safe_isinstance(obj, type)
and not fake_attributes
and _is_dataclass_repr(obj)
):
push_visited(obj_id)
children = []
append = children.append
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
empty=f"{obj.__class__.__name__}()",
)
for last, field in loop_last(
field
for field in fields(obj)
if field.repr and hasattr(obj, field.name)
):
child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
push_visited(obj_id)
class_name = obj.__class__.__name__
if reached_max_depth:
# If we've reached the max depth, we still show the class name, but not its contents
node = Node(
value_repr=f"{class_name}(...)",
)
else:
children = []
append = children.append
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
empty=f"{class_name}()",
)
for last, (key, value) in loop_last(obj._asdict().items()):
child_node = _traverse(value, depth=depth + 1)
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _safe_isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if _safe_isinstance(obj, container_type):
obj_type = container_type
break
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if reached_max_depth:
node = Node(value_repr=f"{open_brace}...{close_brace}")
elif obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if _safe_isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child, depth=depth + 1)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child, depth=depth + 1)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items - max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = type(obj) == tuple
node.is_namedtuple = _is_namedtuple(obj)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if _safe_isinstance(_object, Node):
node = _object
else:
node = traverse(
_object, max_length=max_length, max_string=max_string, max_depth=max_depth
)
repr_str: str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
from typing import NamedTuple
class StockKeepingUnit(NamedTuple):
name: str
description: str
price: float
category: str
reviews: List[str]
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"namedtuple": StockKeepingUnit(
"Sparkling British Spring Water",
"Carbonated spring water",
0.9,
"water",
["its amazing!", "its terrible!"],
),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore[attr-defined]
from pipenv.patched.pip._vendor.rich import print
print(Pretty(data, indent_guides=True, max_string=20))
class Thing:
def __repr__(self) -> str:
return "Hello\x1b[38;5;239m World!"
print(Pretty(Thing()))
| _Line |
python | gabrielfalcao__HTTPretty | tests/functional/base.py | {
"start": 2400,
"end": 3671
} | class ____(threading.Thread):
def __init__(self, lock, port, *args, **kw):
self.lock = lock
self.port = int(port)
self._stop = threading.Event()
super(JSONEchoServer, self).__init__(*args, **kw)
self.daemon = True
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def setup_application(self):
return tornado.web.Application([
(r"/(.*)", JSONEchoHandler),
])
def run(self):
loop = tornado.ioloop.IOLoop()
application = self.setup_application()
application.listen(self.port)
self.lock.release()
loop.start()
def use_tornado_server(callback):
lock = threading.Lock()
lock.acquire()
@wraps(callback)
def func(*args, **kw):
port = os.getenv('TEST_PORT', get_free_tcp_port())
POTENTIAL_HTTP_PORTS.add(port)
kw['port'] = port
server = JSONEchoServer(lock, port)
server.start()
try:
lock.acquire()
callback(*args, **kw)
finally:
lock.release()
server.stop()
if port in POTENTIAL_HTTP_PORTS:
POTENTIAL_HTTP_PORTS.remove(port)
return func
| JSONEchoServer |
python | astropy__astropy | astropy/time/formats.py | {
"start": 78945,
"end": 79580
} | class ____(TimeFormat):
"""Base class for time delta representations."""
_registry = TIME_DELTA_FORMATS
_default_precision = 3
# Somewhat arbitrary values that are effectively no limit for precision.
_min_precision = -99
_max_precision = 99
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`.
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_DELTA_SCALES}"
)
return scale
| TimeDeltaFormat |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/operators/databricks_workflow.py | {
"start": 2468,
"end": 10285
} | class ____(BaseOperator):
"""
Creates a Databricks workflow from a DatabricksWorkflowTaskGroup specified in a DAG.
:param task_id: The task_id of the operator
:param databricks_conn_id: The connection ID to use when connecting to Databricks.
:param existing_clusters: A list of existing clusters to use for the workflow.
:param extra_job_params: A dictionary of extra properties which will override the default Databricks
Workflow Job definitions.
:param job_clusters: A list of job clusters to use for the workflow.
:param max_concurrent_runs: The maximum number of concurrent runs for the workflow.
:param notebook_params: A dictionary of notebook parameters to pass to the workflow. These parameters
will be passed to all notebooks in the workflow.
:param tasks_to_convert: A dict of tasks to convert to a Databricks workflow. This list can also be
populated after instantiation using the `add_task` method.
"""
template_fields = ("notebook_params", "job_clusters")
caller = "_CreateDatabricksWorkflowOperator"
# Conditionally set operator_extra_links based on Airflow version
if AIRFLOW_V_3_0_PLUS:
# In Airflow 3, disable "Repair All Failed Tasks" since we can't pre-determine failed tasks
operator_extra_links = (WorkflowJobRunLink(),)
else:
# In Airflow 2.x, keep both links
operator_extra_links = ( # type: ignore[assignment]
WorkflowJobRunLink(),
WorkflowJobRepairAllFailedLink(),
)
def __init__(
self,
task_id: str,
databricks_conn_id: str,
existing_clusters: list[str] | None = None,
extra_job_params: dict[str, Any] | None = None,
job_clusters: list[dict[str, object]] | None = None,
max_concurrent_runs: int = 1,
notebook_params: dict | None = None,
tasks_to_convert: dict[str, BaseOperator] | None = None,
**kwargs,
):
self.databricks_conn_id = databricks_conn_id
self.existing_clusters = existing_clusters or []
self.extra_job_params = extra_job_params or {}
self.job_clusters = job_clusters or []
self.max_concurrent_runs = max_concurrent_runs
self.notebook_params = notebook_params or {}
self.tasks_to_convert = tasks_to_convert or {}
self.relevant_upstreams = [task_id]
self.workflow_run_metadata: WorkflowRunMetadata | None = None
super().__init__(task_id=task_id, **kwargs)
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
caller=caller,
)
@cached_property
def _hook(self) -> DatabricksHook:
return self._get_hook(caller=self.caller)
def add_task(self, task_id, task: BaseOperator) -> None:
"""Add a task to the dict of tasks to convert to a Databricks workflow."""
self.tasks_to_convert[task_id] = task
@property
def job_name(self) -> str:
if not self.task_group:
raise AirflowException("Task group must be set before accessing job_name")
return f"{self.dag_id}.{self.task_group.group_id}"
def create_workflow_json(self, context: Context | None = None) -> dict[str, object]:
"""Create a workflow json to be used in the Databricks API."""
task_json = [
task._convert_to_databricks_workflow_task( # type: ignore[attr-defined]
relevant_upstreams=self.relevant_upstreams, task_dict=self.tasks_to_convert, context=context
)
for task_id, task in self.tasks_to_convert.items()
]
default_json = {
"name": self.job_name,
"email_notifications": {"no_alert_for_skipped_runs": False},
"timeout_seconds": 0,
"tasks": task_json,
"format": "MULTI_TASK",
"job_clusters": self.job_clusters,
"max_concurrent_runs": self.max_concurrent_runs,
}
return merge(default_json, self.extra_job_params)
def _create_or_reset_job(self, context: Context) -> int:
job_spec = self.create_workflow_json(context=context)
existing_jobs = self._hook.list_jobs(job_name=self.job_name)
job_id = existing_jobs[0]["job_id"] if existing_jobs else None
if job_id:
self.log.info(
"Updating existing Databricks workflow job %s with spec %s",
self.job_name,
json.dumps(job_spec, indent=2),
)
self._hook.reset_job(job_id, job_spec)
else:
self.log.info(
"Creating new Databricks workflow job %s with spec %s",
self.job_name,
json.dumps(job_spec, indent=2),
)
job_id = self._hook.create_job(job_spec)
return job_id
def _wait_for_job_to_start(self, run_id: int) -> None:
run_url = self._hook.get_run_page_url(run_id)
self.log.info("Check the progress of the Databricks job at %s", run_url)
life_cycle_state = self._hook.get_run_state(run_id).life_cycle_state
if life_cycle_state not in (
RunLifeCycleState.PENDING.value,
RunLifeCycleState.RUNNING.value,
RunLifeCycleState.BLOCKED.value,
):
raise AirflowException(f"Could not start the workflow job. State: {life_cycle_state}")
while life_cycle_state in (RunLifeCycleState.PENDING.value, RunLifeCycleState.BLOCKED.value):
self.log.info("Waiting for the Databricks job to start running")
time.sleep(5)
life_cycle_state = self._hook.get_run_state(run_id).life_cycle_state
self.log.info("Databricks job started. State: %s", life_cycle_state)
def execute(self, context: Context) -> Any:
if not isinstance(self.task_group, DatabricksWorkflowTaskGroup):
raise AirflowException("Task group must be a DatabricksWorkflowTaskGroup")
job_id = self._create_or_reset_job(context)
run_id = self._hook.run_now(
{
"job_id": job_id,
"jar_params": self.task_group.jar_params,
"notebook_params": self.notebook_params,
"python_params": self.task_group.python_params,
"spark_submit_params": self.task_group.spark_submit_params,
}
)
self._wait_for_job_to_start(run_id)
self.workflow_run_metadata = WorkflowRunMetadata(
self.databricks_conn_id,
job_id,
run_id,
)
# Store operator links in XCom for Airflow 3 compatibility
if AIRFLOW_V_3_0_PLUS:
# Store the job run link
store_databricks_job_run_link(
context=context,
metadata=self.workflow_run_metadata,
logger=self.log,
)
return {
"conn_id": self.databricks_conn_id,
"job_id": job_id,
"run_id": run_id,
}
def on_kill(self) -> None:
if self.workflow_run_metadata:
run_id = self.workflow_run_metadata.run_id
job_id = self.workflow_run_metadata.job_id
self._hook.cancel_run(run_id)
self.log.info(
"Run: %(run_id)s of job_id: %(job_id)s was requested to be cancelled.",
{"run_id": run_id, "job_id": job_id},
)
else:
self.log.error(
"""
Error: Workflow Run metadata is not populated, so the run was not canceled. This could be due
to the workflow not being started or an error in the workflow creation process.
"""
)
| _CreateDatabricksWorkflowOperator |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 17776,
"end": 18348
} | class ____(util.MdCase):
"""Test nested no bounds."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': os.path.join(BASE, '_snippets', 'nested'),
'restrict_base_path': False
}
}
def test_restricted(self):
"""Test file restriction."""
self.check_markdown(
R'''
--8<-- "../b.txt"
''',
'''
<p>Snippet</p>
''',
True
)
| TestSnippetsNestedUnrestricted |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol40.py | {
"start": 480,
"end": 583
} | class ____(Protocol[T]):
def f0(self, right: Self, /) -> "P2Parent[T]":
return right
| P2Parent |
python | pdm-project__pdm | src/pdm/cli/utils.py | {
"start": 1186,
"end": 4655
} | class ____(argparse.RawDescriptionHelpFormatter):
def start_section(self, heading: str | None) -> None:
return super().start_section(termui.style(heading.title() if heading else "", style="warning"))
def _format_usage(
self,
usage: str | None,
actions: Iterable[Action],
groups: Iterable[_ArgumentGroup],
prefix: str | None,
) -> str:
if prefix is None:
prefix = "Usage: "
result = super()._format_usage(usage, actions, groups, prefix) # type: ignore[arg-type]
# Remove continuous spaces
result = re.sub(r" +", " ", result)
if prefix:
return result.replace(prefix, termui.style(prefix, style="warning"))
return result
def _format_action(self, action: Action) -> str:
# determine the required width and the entry label
help_position = min(self._action_max_length + 2, self._max_help_position)
help_width = max(self._width - help_position, 11)
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# no help; start on same line and add a final newline
if not action.help:
tup = "", self._current_indent, action_header
action_header = "{0:>{1}}{2}\n".format(*tup)
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = "", self._current_indent, action_header, action_width # type: ignore[assignment]
action_header = "{0:>{1}}{2:<{3}} ".format(*tup)
indent_first = 0
# long action name; start on the next line
else:
tup = "", self._current_indent, action_header
action_header = "{0:>{1}}{2}\n".format(*tup)
indent_first = help_position
# Special format for empty action_header
# - No extra indent block
# - Help info in the same indent level as subactions
if not action_header.strip():
action_header = ""
help_position = self._current_indent
indent_first = self._current_indent
# collect the pieces of the action help
parts = [termui.style(action_header, style="primary")]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = []
for help_line in help_text.split("\n"):
help_lines += self._split_lines(help_line, help_width)
parts.append("{:>{}}{}\n".format("", indent_first, help_lines[0]))
for line in help_lines[1:]:
parts.append("{:>{}}{}\n".format("", help_position, line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith("\n"):
if action_header:
parts.append("\n")
# cancel out extra indent when action_header is empty
if not action_header:
self._dedent()
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# cancel out extra dedent when action_header is empty
if not action_header:
self._indent()
# return a single string
return self._join_parts(parts)
| PdmFormatter |
python | jazzband__django-pipeline | tests/tests/test_compiler.py | {
"start": 5516,
"end": 5997
} | class ____(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_compile(self):
paths = self.compiler.compile([_("pipeline/js/dummy.coffee")])
default_collector.collect()
self.assertEqual([_("pipeline/js/dummy.junk")], list(paths))
def tearDown(self):
default_collector.clear()
@pipeline_settings(COMPILERS=["tests.tests.test_compiler.InvalidCompiler"])
| CompilerWithEmptyFirstArgTest |
python | scrapy__scrapy | scrapy/http/request/__init__.py | {
"start": 1045,
"end": 2259
} | class ____(TypedDict):
name: str | bytes
value: str | bytes | bool | float | int
domain: NotRequired[str | bytes]
path: NotRequired[str | bytes]
secure: NotRequired[bool]
CookiesT: TypeAlias = dict[str, str] | list[VerboseCookie]
RequestTypeVar = TypeVar("RequestTypeVar", bound="Request")
def NO_CALLBACK(*args: Any, **kwargs: Any) -> NoReturn:
"""When assigned to the ``callback`` parameter of
:class:`~scrapy.Request`, it indicates that the request is not meant
to have a spider callback at all.
For example:
.. code-block:: python
Request("https://example.com", callback=NO_CALLBACK)
This value should be used by :ref:`components <topics-components>` that
create and handle their own requests, e.g. through
:meth:`scrapy.core.engine.ExecutionEngine.download`, so that downloader
middlewares handling such requests can treat them differently from requests
intended for the :meth:`~scrapy.Spider.parse` callback.
"""
raise RuntimeError(
"The NO_CALLBACK callback has been called. This is a special callback "
"value intended for requests whose callback is never meant to be "
"called."
)
| VerboseCookie |
python | numpy__numpy | numpy/lib/tests/test_io.py | {
"start": 49012,
"end": 103191
} | class ____(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = {"dtype": int, "delimiter": ','}
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = [f"# {i}" for i in range(1, 6)]
data.append("A, B, C")
data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)])
data[-1] = "99,99"
kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10}
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True,
encoding='bytes')
assert_(w[0].category is VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, encoding='bytes')
assert_(w[0].category is VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test[f'f{i}'], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_bad_fname(self):
with pytest.raises(TypeError, match='fname must be a string,'):
np.genfromtxt(123)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None,
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None,
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write('\N{GREEK PI SYMBOL}'.encode())
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None, encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, encoding="bytes",
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
((b'r' not in x.lower() and x.strip()) or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
((b'%' not in x.lower() and x.strip()) or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = {
"converters": {2: strip_per, 3: strip_rand}, "delimiter": ",",
"dtype": None, "encoding": "bytes"}
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
@pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning")
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3}
dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv, encoding="bytes")
control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)],
dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')]
test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0, 1, 3), names=None, converters=conv,
encoding="bytes")
control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_dtype_with_object_no_converter(self):
# Object without a converter uses bytes:
parsed = np.genfromtxt(TextIO("1"), dtype=object)
assert parsed[()] == b"1"
parsed = np.genfromtxt(TextIO("string"), dtype=object)
assert parsed[()] == b"string"
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: str},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = {"names": "a, b, c"}
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True}
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = {"dtype": None, "delimiter": ",", "names": True}
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j},
usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = {"delimiter": ",",
"dtype": int,
"names": "a,b,c",
"missing_values": {0: "N/A", 'b': " ", 2: "???"},
"filling_values": {0: 0, 'b': 0, 2: -999}}
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = {"delimiter": ",", "dtype": None, "names": True}
def f():
return np.genfromtxt(mdata, invalid_raise=False, **kwargs)
mtest = pytest.warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = {"delimiter": ",", "dtype": None, "names": True,
"invalid_raise": False}
def f():
return np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
mtest = pytest.warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: f"({x.decode()})"}
kwargs = {"delimiter": ",", "converters": converters,
"dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"}
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"}
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = {"delimiter": ",", "names": True}
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None}
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = {"delimiter": 5, "names": True, "dtype": None}
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999}
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',',
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',',
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], "test1")
assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], "test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',',
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding="bytes")
assert_(w[0].category is VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.str_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = '\u03d6'
latin1 = '\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with open(path, "wt") as f:
f.write("norm1,norm2,norm3\n")
f.write("norm1," + latin1 + ",norm3\n")
f.write("test1,testNonethe" + utf8 + ",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="bytes")
# Check for warning when encoding not specified.
assert_(w[0].category is VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.str_)
assert_array_equal(test, ctl)
@pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning")
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True}
test = recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning")
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True,
"encoding": "bytes"}
test = recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
# gh-10394
data = TextIO('color\n"red"\n"blue"')
test = recfromcsv(data, converters={0: lambda x: x.strip('\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.int_)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
def test_unpack_float_data(self):
txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0")
a, b, c = np.loadtxt(txt, delimiter=",", unpack=True)
assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0]))
assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0]))
assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0]))
def test_unpack_structured(self):
# Regression test for gh-4341
# Unpacking should work on structured arrays
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_equal(a.dtype, np.dtype('S1'))
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(c.dtype, np.dtype('f4'))
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_unpack_auto_dtype(self):
# Regression test for gh-4341
# Unpacking should work when dtype=None
txt = TextIO("M 21 72.\nF 35 58.")
expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
for arr, result in zip(expected, test):
assert_array_equal(arr, result)
assert_equal(arr.dtype, result.dtype)
def test_unpack_single_name(self):
# Regression test for gh-4341
# Unpacking should work when structured dtype has only one field
txt = TextIO("21\n35")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array([21, 35], dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal(expected.dtype, test.dtype)
def test_squeeze_scalar(self):
# Regression test for gh-4341
# Unpacking a scalar should give zero-dim output,
# even if dtype is structured
txt = TextIO("1")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array((1,), dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal((), test.shape)
assert_equal(expected.dtype, test.dtype)
@pytest.mark.parametrize("ndim", [0, 1, 2])
def test_ndmin_keyword(self, ndim: int):
# let's have the same behaviour of ndmin as loadtxt
# as they should be the same for non-missing values
txt = "42"
a = np.loadtxt(StringIO(txt), ndmin=ndim)
b = np.genfromtxt(StringIO(txt), ndmin=ndim)
assert_array_equal(a, b)
| TestFromTxt |
python | redis__redis-py | redis/commands/search/hybrid_query.py | {
"start": 11991,
"end": 12307
} | class ____(Filter):
def __init__(
self,
conditions: str,
) -> None:
"""
Create a new hybrid filter object.
Args:
conditions: Filter conditions.
"""
args = [conditions]
Filter.__init__(self, "FILTER", *args)
@experimental
| HybridFilter |
python | huggingface__transformers | src/transformers/models/dinat/modeling_dinat.py | {
"start": 12329,
"end": 12782
} | class ____(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| NeighborhoodAttentionOutput |
python | cython__cython | tests/run/test_grammar.py | {
"start": 4213,
"end": 10420
} | class ____(unittest.TestCase):
#from test.support import check_syntax_error
check_syntax_error = check_syntax_error
def test_backslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def test_plain_integers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def test_long_integers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def test_floats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def test_float_exponent_tokenization(self):
# See issue 21642.
self.assertEqual(1 if 1else 0, 1)
self.assertEqual(1 if 0else 0, 0)
self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
@skip("Done more efficiently in TestGrammar")
def test_underscore_literals(self):
for lit in VALID_UNDERSCORE_LITERALS:
self.assertEqual(eval(lit), eval(lit.replace('_', '')))
for lit in INVALID_UNDERSCORE_LITERALS:
self.assertRaises(SyntaxError, eval, lit)
# Sanity check: no literal begins with an underscore
self.assertRaises(NameError, eval, "_0")
def test_bad_numerical_literals(self):
check = self.check_syntax_error
check("0b12", "invalid digit '2' in binary literal")
check("0b1_2", "invalid digit '2' in binary literal")
check("0b2", "invalid digit '2' in binary literal")
check("0b1_", "invalid binary literal")
check("0b", "invalid binary literal")
check("0o18", "invalid digit '8' in octal literal")
check("0o1_8", "invalid digit '8' in octal literal")
check("0o8", "invalid digit '8' in octal literal")
check("0o1_", "invalid octal literal")
check("0o", "invalid octal literal")
check("0x1_", "invalid hexadecimal literal")
check("0x", "invalid hexadecimal literal")
check("1_", "invalid decimal literal")
# FIXME: not currently a syntax error
"""
check("012",
"leading zeros in decimal integer literals are not permitted; "
"use an 0o prefix for octal integers")
"""
check("1.2_", "invalid decimal literal")
check("1e2_", "invalid decimal literal")
check("1e+", "invalid decimal literal")
def test_string_literals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
def test_ellipsis(self):
x = ...
self.assertTrue(x is Ellipsis)
# FIXME: why is this not rejected ???
#self.assertRaises(SyntaxError, eval, ".. .")
def test_eof_error(self):
samples = ("def foo(", "\ndef foo(", "def foo(\n")
for s in samples:
with self.assertRaises(SyntaxError) as cm:
compile(s, "<test>", "exec")
self.assertIn("unexpected EOF", str(cm.exception))
var_annot_global: int # a global annotated is necessary for test_var_annot
# custom namespace for testing __annotations__
| TokenTests |
python | pytorch__pytorch | torch/distributions/utils.py | {
"start": 6116,
"end": 8027
} | class ____(lazy_property[T, R], property):
"""We want lazy properties to look like multiple things.
* property when Sphinx autodoc looks
* lazy_property when Distribution validate_args looks
"""
def __init__(self, wrapped: Callable[[T], R]) -> None:
property.__init__(self, wrapped)
def tril_matrix_to_vec(mat: Tensor, diag: int = 0) -> Tensor:
r"""
Convert a `D x D` matrix or a batch of matrices into a (batched) vector
which comprises of lower triangular elements from the matrix in row order.
"""
n = mat.shape[-1]
if not torch._C._get_tracing_state() and (diag < -n or diag >= n):
raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n - 1}].")
arange = torch.arange(n, device=mat.device)
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
vec = mat[..., tril_mask]
return vec
def vec_to_tril_matrix(vec: Tensor, diag: int = 0) -> Tensor:
r"""
Convert a vector or a batch of vectors into a batched `D x D`
lower triangular matrix containing elements from the vector in row order.
"""
# +ve root of D**2 + (1+2*diag)*D - |diag| * (diag+1) - 2*vec.shape[-1] = 0
n = (
-(1 + 2 * diag)
+ ((1 + 2 * diag) ** 2 + 8 * vec.shape[-1] + 4 * abs(diag) * (diag + 1)) ** 0.5
) / 2
eps = torch.finfo(vec.dtype).eps
if not torch._C._get_tracing_state() and (round(n) - n > eps):
raise ValueError(
f"The size of last dimension is {vec.shape[-1]} which cannot be expressed as "
+ "the lower triangular part of a square D x D matrix."
)
n = round(n.item()) if isinstance(n, torch.Tensor) else round(n)
mat = vec.new_zeros(vec.shape[:-1] + torch.Size((n, n)))
arange = torch.arange(n, device=vec.device)
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
mat[..., tril_mask] = vec
return mat
| _lazy_property_and_property |
python | viewflow__viewflow | viewflow/workflow/flow/views/list.py | {
"start": 350,
"end": 1490
} | class ____(
mixins.StoreRequestPathMixin,
mixins.ProcessViewTemplateNames,
ListModelView,
):
"""List of current user assigned tasks of a flow"""
flow_class = None
template_filename = "process_tasks_list.html"
title = _("Inbox")
columns = ("task_id", "task_title", "brief", "created")
filterset_class = filters.FlowUserTaskListFilter
def task_id(self, task):
task_url = task.flow_task.reverse("index", args=[task.process_id, task.pk])
return mark_safe(f'<a href="{task_url}">#{task.process_id}/{task.pk}</a>')
task_id.short_description = _("#")
def task_title(self, obj):
return obj.title
task_title.short_description = _("Task")
@property
def model(self):
return self.flow_class.task_class
@viewprop
def queryset(self):
"""List of tasks assigned to the current user."""
queryset = self.model._default_manager.all()
return queryset.filter(
process__flow_class=self.flow_class,
owner=self.request.user,
status=STATUS.ASSIGNED,
).order_by("-created")
| FlowInboxListView |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 801,
"end": 845
} | class ____(
object # )
,
):
...
| A |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_values_to_not_match_like_pattern.py | {
"start": 2071,
"end": 13578
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnValuesToNotMatchLikePattern is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
like_pattern (str): \
{LIKE_PATTERN_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1.
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)
[ExpectColumnValuesToMatchRegexList](https://greatexpectations.io/expectations/expect_column_values_to_match_regex_list)
[ExpectColumnValuesToNotMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)
[ExpectColumnValuesToNotMatchRegexList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex_list)
[ExpectColumnValuesToMatchLikePattern](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern)
[ExpectColumnValuesToMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)
[ExpectColumnValuesToNotMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 "aaa" "ade"
1 "abb" "bee"
2 "acc" "24601"
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToNotMatchLikePattern(
column="test2",
like_pattern="[a]%",
mostly=.66
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 1,
"unexpected_percent": 33.33333333333333,
"partial_unexpected_list": [
"ade",
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 33.33333333333333,
"unexpected_percent_nonmissing": 33.33333333333333
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToNotMatchLikePattern(
column="test",
like_pattern="[a]%"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 3,
"unexpected_percent": 100,
"partial_unexpected_list": [
"aaa",
"abb",
"acc"
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 100,
"unexpected_percent_nonmissing": 100
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
like_pattern: Union[str, SuiteParameterDict] = pydantic.Field(
description=LIKE_PATTERN_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.not_match_like_pattern"
success_keys = (
"mostly",
"like_pattern",
)
args_keys = (
"column",
"like_pattern",
)
class Config:
title = "Expect column values to not match like pattern"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValuesToNotMatchLikePattern]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("like_pattern", RendererValueType.STRING),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if renderer_configuration.include_column_name:
template_str = "$column values "
else:
template_str = "Values "
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += (
"must not match like pattern $like_pattern, at least $mostly_pct % of the time."
)
else:
template_str += "must not match like pattern $like_pattern."
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> List[RenderedStringTemplateContent]:
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "like_pattern", "mostly"],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
mostly_str = "" if params.get("mostly") is None else ", at least $mostly_pct % of the time"
like_pattern = params.get("like_pattern") # noqa: F841 # FIXME CoP
template_str = f"Values must not match like pattern : $like_pattern {mostly_str} "
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectColumnValuesToNotMatchLikePattern |
python | doocs__leetcode | solution/2300-2399/2379.Minimum Recolors to Get K Consecutive Black Blocks/Solution.py | {
"start": 0,
"end": 284
} | class ____:
def minimumRecolors(self, blocks: str, k: int) -> int:
ans = cnt = blocks[:k].count('W')
for i in range(k, len(blocks)):
cnt += blocks[i] == 'W'
cnt -= blocks[i - k] == 'W'
ans = min(ans, cnt)
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolModule2.py | {
"start": 1809,
"end": 1902
} | class ____(Protocol):
@property
def var_1(self) -> int: ...
v6: P6 = protocolModule1
| P6 |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 9847,
"end": 13497
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "basic"
expected_hash = "822ac7501287"
@patch.object(CompressCommand, "compress")
def test_handle_no_args(self, compress_mock):
compress_mock.return_value = {}, 1, []
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 1)
@patch.object(CompressCommand, "compress")
def test_handle_compress_disabled(self, compress_mock):
with self.settings(COMPRESS_ENABLED=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, "compress")
def test_handle_compress_offline_disabled(self, compress_mock):
with self.settings(COMPRESS_OFFLINE=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, "compress")
def test_handle_compress_offline_disabled_force(self, compress_mock):
compress_mock.return_value = {}, 1, []
with self.settings(COMPRESS_OFFLINE=False):
CompressCommand().handle(force=True)
self.assertEqual(compress_mock.call_count, 1)
def test_rendering_without_manifest_raises_exception(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError, self.template.render, Context({}))
def test_rendering_without_manifest_raises_exception_jinja2(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError, self.template_jinja2.render, {})
def _test_deleting_manifest_does_not_affect_rendering(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
get_offline_manifest()
manifest_filename = "manifest.json"
if default_offline_manifest_storage.exists(manifest_filename):
default_offline_manifest_storage.delete(manifest_filename)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_deleting_manifest_does_not_affect_rendering(self):
for engine in self.engines:
self._test_deleting_manifest_does_not_affect_rendering(engine)
def test_get_loaders(self):
TEMPLATE_LOADERS = (
(
"django.template.loaders.cached.Loader",
(
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
),
),
)
with self.settings(TEMPLATE_LOADERS=TEMPLATE_LOADERS):
from django.template.loaders.filesystem import Loader as FileSystemLoader
from django.template.loaders.app_directories import (
Loader as AppDirectoriesLoader,
)
loaders = CompressCommand().get_loaders()
self.assertTrue(isinstance(loaders[0], FileSystemLoader))
self.assertTrue(isinstance(loaders[1], AppDirectoriesLoader))
@patch(
"compressor.offline.django.DjangoParser.render_node",
side_effect=Exception(b"non-ascii character here:\xc3\xa4"),
)
def test_non_ascii_exception_messages(self, mock):
with self.assertRaises(CommandError):
CompressCommand().handle(verbosity=0)
| OfflineCompressBasicTestCase |
python | PyCQA__isort | isort/io.py | {
"start": 452,
"end": 2067
} | class ____:
stream: TextIO
path: Path
encoding: str
@staticmethod
def detect_encoding(filename: str | Path, readline: Callable[[], bytes]) -> str:
try:
return tokenize.detect_encoding(readline)[0]
except Exception:
raise UnsupportedEncoding(filename)
@staticmethod
def from_contents(contents: str, filename: str) -> "File":
encoding = File.detect_encoding(filename, BytesIO(contents.encode("utf-8")).readline)
return File(stream=StringIO(contents), path=Path(filename).resolve(), encoding=encoding)
@property
def extension(self) -> str:
return self.path.suffix.lstrip(".")
@staticmethod
def _open(filename: str | Path) -> TextIOWrapper:
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, "rb")
try:
encoding = File.detect_encoding(filename, buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True, newline="")
text.mode = "r" # type: ignore
return text
except Exception:
buffer.close()
raise
@staticmethod
@contextmanager
def read(filename: str | Path) -> Iterator["File"]:
file_path = Path(filename).resolve()
stream = None
try:
stream = File._open(file_path)
yield File(stream=stream, path=file_path, encoding=stream.encoding)
finally:
if stream is not None:
stream.close()
| File |
python | doocs__leetcode | solution/1800-1899/1830.Minimum Number of Operations to Make String Sorted/Solution.py | {
"start": 151,
"end": 591
} | class ____:
def makeStringSorted(self, s: str) -> int:
cnt = Counter(s)
ans, n = 0, len(s)
for i, c in enumerate(s):
m = sum(v for a, v in cnt.items() if a < c)
t = f[n - i - 1] * m
for v in cnt.values():
t = t * g[v] % mod
ans = (ans + t) % mod
cnt[c] -= 1
if cnt[c] == 0:
cnt.pop(c)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/evolla/modular_evolla.py | {
"start": 5970,
"end": 6020
} | class ____(EsmEncoder):
pass
| EvollaSaProtEncoder |
python | django__django | tests/admin_scripts/tests.py | {
"start": 32344,
"end": 33657
} | class ____(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"""
no settings: manage.py builtin commands fail with an error when no
settings provided.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(
err, r"No module named '?(test_project\.)?settings'?", regex=True
)
def test_builtin_with_bad_settings(self):
"""
no settings: manage.py builtin commands fail if settings file (from
argument) doesn't exist.
"""
args = ["check", "--settings=bad_settings", "admin_scripts"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"""
no settings: manage.py builtin commands fail if settings file (from
environment) doesn't exist.
"""
args = ["check", "admin_scripts"]
out, err = self.run_manage(args, "bad_settings")
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
| ManageNoSettings |
python | kamyu104__LeetCode-Solutions | Python/maximize-consecutive-elements-in-an-array-after-modification.py | {
"start": 836,
"end": 1622
} | class ____(object):
def maxSelectedElements(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
dp = collections.defaultdict(int)
dp[nums[0]] = dp[nums[0]+1] = 1
for i in xrange(1, len(nums)):
if nums[i] == nums[i-1]:
dp[nums[i]+1] = dp[nums[i]]+1
elif nums[i] == nums[i-1]+1:
dp[nums[i]+1] = dp[nums[i]]+1
dp[nums[i]] = dp[nums[i]-1]+1
elif nums[i] == nums[i-1]+2:
dp[nums[i]] = dp[nums[i]-1]+1
dp[nums[i]+1] = 1
else:
dp[nums[i]] = dp[nums[i]+1] = 1
return max(dp.itervalues())
# Time: O(nlogn)
# Space: O(n)
import collections
# sort, dp
| Solution2 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py | {
"start": 894,
"end": 1111
} | class ____:
def func1():
pass
# comment
def func2():
pass
# This is a
# ... multi-line comment
def func3():
pass
# This is a
# ... multi-line comment
@decorator
| Class |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Sphinx.py | {
"start": 8868,
"end": 11655
} | class ____:
def test_useless_docs_ignored_argument_names_sphinx( # [useless-type-doc, useless-param-doc]
self, arg, _, _ignored
):
"""Example of a method documenting the return type that an
implementation should return.
:param arg: An argument.
:type arg: int
:param _: Another argument.
:type _: float
:param _ignored: Ignored argument.
"""
pass
def test_finds_multiple_types_sphinx_one(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: dict(str, str)
:returns: named_arg
:rtype: dict(str, str)
"""
return named_arg
def test_finds_multiple_types_sphinx_two(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: dict[str, str]
:returns: named_arg
:rtype: dict[str, str]
"""
return named_arg
def test_finds_multiple_types_sphinx_three(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: int or str
:returns: named_arg
:rtype: int or str
"""
return named_arg
def test_finds_multiple_types_sphinx_four(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: tuple(int or str)
:returns: named_arg
:rtype: tuple(int or str)
"""
return named_arg
def test_finds_multiple_types_sphinx_five(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: tuple(int) or list(int)
:returns: named_arg
:rtype: tuple(int) or list(int)
"""
return named_arg
def test_finds_multiple_types_sphinx_six(named_arg):
"""The Sphinx docstring
:param named_arg: Returned
:type named_arg: tuple(int or str) or list(int or str)
:returns: named_arg
:rtype: tuple(int or str) or list(int or str)
"""
return named_arg
def test_finds_compact_container_types_sphinx_one(named_arg):
"""The Sphinx docstring
:param dict(str,str) named_arg: Returned
:returns: named_arg
:rtype: dict(str,str)
"""
return named_arg
def test_finds_compact_container_types_sphinx_two(named_arg):
"""The Sphinx docstring
:param dict[str,str] named_arg: Returned
:returns: named_arg
:rtype: dict[str,str]
"""
return named_arg
def test_finds_compact_container_types_sphinx_three(named_arg):
"""The Sphinx docstring
:param tuple(int) named_arg: Returned
:returns: named_arg
:rtype: tuple(int)
"""
return named_arg
def test_finds_compact_container_types_sphinx_four(named_arg):
"""The Sphinx docstring
:param list[tokenize.TokenInfo] named_arg: Returned
:returns: named_arg
:rtype: list[tokenize.TokenInfo]
"""
return named_arg
| Foo |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 2946,
"end": 3698
} | class ____:
foo: int
required_enum: "BasicEnum" = field()
opt: "bool | None" = None
baz: "str" = field(default="toto", metadata={"help": "help message"})
foo_str: "list[str]" = list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class WithDefaultBoolExamplePep604:
foo: bool = False
baz: bool = True
opt: bool | None = None
@dataclass
class OptionalExamplePep604:
foo: int | None = None
bar: float | None = field(default=None, metadata={"help": "help message"})
baz: str | None = None
ces: list[str] | None = list_field(default=[])
des: list[int] | None = list_field(default=[])
| StringLiteralAnnotationExample |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 6717,
"end": 6822
} | class ____(DocumentEvent):
''' Base class for connection status related events.
'''
| ConnectionEvent |
python | ansible__ansible | test/lib/ansible_test/_internal/config.py | {
"start": 809,
"end": 994
} | class ____:
"""Configuration for modules."""
python_requires: str
python_versions: tuple[str, ...]
controller_only: bool
@dataclasses.dataclass(frozen=True)
| ModulesConfig |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 97903,
"end": 101717
} | class ____:
@pytest.fixture
def _organization_update(self, pyramid_user):
self.user = UserFactory.create()
EmailFactory.create(user=self.user, verified=True)
self.organization_name = "example"
self.organization_display_name = "Example"
self.organization_link_url = "https://www.example.com/"
self.organization_description = "An example organization for testing"
self.organization_orgtype = "Company"
self.previous_organization_display_name = "Example Group"
self.previous_organization_link_url = "https://www.example.com/group/"
self.previous_organization_description = "An example group for testing"
self.previous_organization_orgtype = "Community"
@pytest.mark.usefixtures("_organization_update")
def test_send_organization_renamed_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-updated"
)
result = email.send_organization_updated_email(
db_request,
self.user,
organization_name=self.organization_name,
organization_display_name=self.organization_display_name,
organization_link_url=self.organization_link_url,
organization_description=self.organization_description,
organization_orgtype=self.organization_orgtype,
previous_organization_display_name=self.previous_organization_display_name,
previous_organization_link_url=self.previous_organization_link_url,
previous_organization_description=self.previous_organization_description,
previous_organization_orgtype=self.previous_organization_orgtype,
)
assert result == {
"organization_name": self.organization_name,
"organization_display_name": self.organization_display_name,
"organization_link_url": self.organization_link_url,
"organization_description": self.organization_description,
"organization_orgtype": self.organization_orgtype,
"previous_organization_display_name": (
self.previous_organization_display_name
),
"previous_organization_link_url": self.previous_organization_link_url,
"previous_organization_description": self.previous_organization_description,
"previous_organization_orgtype": self.previous_organization_orgtype,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
| TestOrganizationUpdateEmails |
python | pytorch__pytorch | test/test_utils.py | {
"start": 20568,
"end": 22988
} | class ____(TestCase):
MAX_TIMEOUT_IN_SECOND = 300
def test_random_seed(self):
def run():
dataloader = torch.utils.data.DataLoader(
RandomDatasetMock(),
batch_size=2,
num_workers=4,
shuffle=True,
timeout=self.MAX_TIMEOUT_IN_SECOND,
)
return next(iter(dataloader))
torch.manual_seed(2018)
x1 = run()
torch.manual_seed(2018)
x2 = run()
self.assertEqual(x1, x2)
def test_single_keep(self):
# torch.rand(5, 3, 3, 2) is a Tensor here; technically not a valid input because
# not a Dataset subclass, but needs to stay working so add ignore's
# for type checking with mypy
dataloader: DataLoader = DataLoader(
torch.rand(5, 3, 3, 2), # type: ignore[arg-type]
batch_size=3,
num_workers=0,
drop_last=False,
)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 2)
def test_single_drop(self):
dataloader: DataLoader = DataLoader(
torch.rand(5, 3, 3, 2), # type: ignore[arg-type]
batch_size=3,
num_workers=0,
drop_last=True,
)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 1)
@unittest.skip(
"FIXME: Intermittent GPU out-of-memory error on Windows and time-out under ASAN"
)
def test_multi_keep(self):
dataloader: DataLoader = DataLoader(
torch.rand(5, 3, 3, 2), # type: ignore[arg-type]
batch_size=3,
num_workers=2,
drop_last=False,
timeout=self.MAX_TIMEOUT_IN_SECOND,
)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 2)
def test_multi_drop(self):
dataloader: DataLoader = DataLoader(
torch.rand(5, 3, 3, 2), # type: ignore[arg-type]
batch_size=3,
num_workers=2,
drop_last=True,
timeout=self.MAX_TIMEOUT_IN_SECOND,
)
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 1)
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
from torch.utils.collect_env import get_pretty_env_info
@unittest.skipIf(IS_FBCODE, "runs pip which is not available internally")
| TestDataLoaderUtils |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 1047,
"end": 1224
} | class ____(TypedDict):
userNotifiedTimestamp: datetime
environmentMutedTimestamp: datetime
@register(MonitorEnvBrokenDetection)
| MonitorEnvBrokenDetectionSerializerResponse |
python | django__django | tests/template_tests/test_logging.py | {
"start": 124,
"end": 2412
} | class ____(SimpleTestCase):
loglevel = logging.DEBUG
def test_log_on_variable_does_not_exist_silent(self):
class TestObject:
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template_name"
@property
def template(self):
return Engine().from_string("")
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return (attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
with self.assertLogs("django.template", self.loglevel) as cm:
Variable("article").resolve(TestObject())
self.assertEqual(len(cm.records), 1)
log_record = cm.records[0]
self.assertEqual(
log_record.getMessage(),
"Exception while resolving variable 'article' in template 'template_name'.",
)
self.assertIsNotNone(log_record.exc_info)
raised_exception = log_record.exc_info[1]
self.assertEqual(str(raised_exception), "Attribute does not exist.")
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertLogs("django.template", self.loglevel) as cm:
with self.assertRaises(VariableDoesNotExist):
Variable("article.author").resolve({"article": {"section": "News"}})
self.assertEqual(len(cm.records), 1)
log_record = cm.records[0]
self.assertEqual(
log_record.getMessage(),
"Exception while resolving variable 'author' in template 'unknown'.",
)
self.assertIsNotNone(log_record.exc_info)
raised_exception = log_record.exc_info[1]
self.assertEqual(
str(raised_exception),
"Failed lookup for key [author] in {'section': 'News'}",
)
def test_no_log_when_variable_exists(self):
with self.assertNoLogs("django.template", self.loglevel):
Variable("article.section").resolve({"article": {"section": "News"}})
| VariableResolveLoggingTests |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 9643,
"end": 12248
} | class ____(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np, name="x")
# TODO(b/241834841): Test l2_normalize with `axis` set to other dims
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_impl.l2_normalize, [x_tf])
self.assertAllClose(theoretical, numerical)
def testL2NormalizeComplex(self):
x_shape = [20, 7, 3]
for dtype in [np.complex64, np.complex128]:
np.random.seed(1)
x_np = (
np.random.random_sample(x_shape).astype(dtype) +
np.random.random_sample(x_shape).astype(dtype) * 1j)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
DROPOUT_FNS = [
("stateful_v1", nn_ops.dropout),
("stateful_v2", nn_ops.dropout_v2),
("stateless", functools.partial(nn_ops.stateless_dropout, seed=(1, 2))),
("stateless_philox", functools.partial(
nn_ops.stateless_dropout, seed=(1, 2), rng_alg="philox")),
("generator", functools.partial( # pylint: disable=g-long-lambda
nn_ops.general_dropout, uniform_sampler=lambda shape, dtype: ( # pylint: disable=g-long-lambda
stateful_random_ops.Generator.from_seed(1).uniform(
shape=shape, dtype=dtype)))),
]
| L2NormalizeTest |
python | cherrypy__cherrypy | cherrypy/process/plugins.py | {
"start": 1200,
"end": 2191
} | class ____(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.
"""
def __init__(self, bus):
"""Initialize a simple plugin."""
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
| SimplePlugin |
python | simplejson__simplejson | simplejson/tests/test_for_json.py | {
"start": 216,
"end": 293
} | class ____(object):
def for_json(self):
return ['list']
| ForJsonList |
python | scikit-image__scikit-image | src/skimage/measure/fit.py | {
"start": 915,
"end": 1192
} | class ____:
def __init_subclass__(self):
warn(
f'`BaseModel` deprecated since version {_PARAMS_DEP_START} and '
f'will be removed in version {_PARAMS_DEP_STOP}',
category=FutureWarning,
stacklevel=2,
)
| BaseModel |
python | MongoEngine__mongoengine | tests/fixtures.py | {
"start": 194,
"end": 425
} | class ____(Document):
number = IntField()
string = StringField(choices=(("One", "1"), ("Two", "2")))
embedded = EmbeddedDocumentField(PickleEmbedded)
lists = ListField(StringField())
photo = FileField()
| PickleTest |
python | ethereum__web3.py | web3/types.py | {
"start": 14750,
"end": 15129
} | class ____(TypedDict, total=False):
after: int
count: int
fromAddress: Sequence[Address | ChecksumAddress | ENS]
fromBlock: BlockIdentifier
toAddress: Sequence[Address | ChecksumAddress | ENS]
toBlock: BlockIdentifier
# Subscriptions
SubscriptionType = Literal[
"newHeads",
"logs",
"newPendingTransactions",
"syncing",
]
| TraceFilterParams |
python | huggingface__transformers | src/transformers/models/sam_hq/modular_sam_hq.py | {
"start": 7316,
"end": 8190
} | class ____(ModelOutput):
r"""
masks (`torch.FloatTensor` of shape `(batch_size, num_prompts, num_masks, height, width)`):
The predicted masks for the input image. The masks are of shape `(batch_size, num_prompts, num_masks, height, width)`.
iou_scores (`torch.FloatTensor` of shape `(batch_size, num_prompts, num_masks)`):
The predicted IoU scores for each mask. The scores are of shape `(batch_size, num_prompts, num_masks)`.
mask_decoder_attentions (`torch.FloatTensor`, *optional*):
The attention weights from the mask decoder, if `output_attentions=True` was passed during the forward pass.
This is specific to SAM-HQ and not present in base SAM.
"""
masks: torch.FloatTensor
iou_scores: Optional[torch.FloatTensor] = None
mask_decoder_attentions: Optional[torch.FloatTensor] = None
| SamHQMMaskDecoderOutputs |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 43745,
"end": 44619
} | class ____(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@auto_docstring
# Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr
| MLP |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/events.py | {
"start": 24272,
"end": 26075
} | class ____(
NamedTuple(
"_TypeCheck",
[
("success", PublicAttr[bool]),
("description", PublicAttr[Optional[str]]),
("metadata", PublicAttr[Mapping[str, MetadataValue]]),
],
)
):
"""Event corresponding to a successful typecheck.
Events of this type should be returned by user-defined type checks when they need to encapsulate
additional metadata about a type check's success or failure. (i.e., when using
:py:func:`as_dagster_type`, :py:func:`@usable_as_dagster_type <dagster_type>`, or the underlying
:py:func:`PythonObjectDagsterType` API.)
Op compute functions should generally avoid yielding events of this type to avoid confusion.
Args:
success (bool): ``True`` if the type check succeeded, ``False`` otherwise.
description (Optional[str]): A human-readable description of the type check.
metadata (Optional[Dict[str, RawMetadataValue]]):
Arbitrary metadata about the failure. Keys are displayed string labels, and values are
one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
"""
def __new__(
cls,
success: bool,
description: Optional[str] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
):
normed_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return super().__new__(
cls,
success=check.bool_param(success, "success"),
description=check.opt_str_param(description, "description"),
metadata=normed_metadata,
)
@public
| TypeCheck |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_types.py | {
"start": 1461,
"end": 1564
} | class ____(RawMessageStopEvent):
type: Literal["message_stop"]
message: Message
| MessageStopEvent |
python | huggingface__transformers | tests/test_video_processing_common.py | {
"start": 2979,
"end": 23868
} | class ____:
test_cast_dtype = None
fast_video_processing_class = None
video_processor_list = None
input_name = "pixel_values_videos"
def setUp(self):
video_processor_list = []
if self.fast_video_processing_class:
video_processor_list.append(self.fast_video_processing_class)
self.video_processor_list = video_processor_list
def test_video_processor_to_json_string(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
obj = json.loads(video_processor.to_json_string())
for key, value in self.video_processor_dict.items():
self.assertEqual(obj[key], value)
def test_video_processor_to_json_file(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "video_processor.json")
video_processor_first.to_json_file(json_file_path)
video_processor_second = video_processing_class.from_json_file(json_file_path)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"shortest_edge": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
def test_video_processor_from_and_save_pretrained(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = video_processor_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
video_processor_second = video_processing_class.from_pretrained(tmpdirname)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_video_processor_save_load_with_autovideoprocessor(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = video_processor_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
use_fast = video_processing_class.__name__.endswith("Fast")
video_processor_second = AutoVideoProcessor.from_pretrained(tmpdirname, use_fast=use_fast)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_init_without_params(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class()
self.assertIsNotNone(video_processor)
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_video_processor(self):
if self.fast_video_processing_class is None:
self.skipTest("Skipping compilation test as fast video processor is not defined")
if version.parse(torch.__version__) < version.parse("2.3"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
torch.compiler.reset()
video_inputs = self.video_processor_tester.prepare_video_inputs(equal_resolution=False, return_tensors="torch")
video_processor = self.fast_video_processing_class(**self.video_processor_dict)
output_eager = video_processor(video_inputs, device=torch_device, do_sample_frames=False, return_tensors="pt")
video_processor = torch.compile(video_processor, mode="reduce-overhead")
output_compiled = video_processor(
video_inputs, device=torch_device, do_sample_frames=False, return_tensors="pt"
)
torch.testing.assert_close(
output_eager[self.input_name], output_compiled[self.input_name], rtol=1e-4, atol=1e-4
)
@require_torch
@require_vision
def test_cast_dtype_device(self):
for video_processing_class in self.video_processor_list:
if self.test_cast_dtype is not None:
# Initialize video_processor
video_processor = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
encoding = video_processor(video_inputs, return_tensors="pt")
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float32)
encoding = video_processor(video_inputs, return_tensors="pt").to(torch.float16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float16)
encoding = video_processor(video_inputs, return_tensors="pt").to("cpu", torch.bfloat16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.bfloat16)
with self.assertRaises(TypeError):
_ = video_processor(video_inputs, return_tensors="pt").to(torch.bfloat16, "cpu")
# Try with text + video feature
encoding = video_processor(video_inputs, return_tensors="pt")
encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])})
encoding = encoding.to(torch.float16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float16)
self.assertEqual(encoding.input_ids.dtype, torch.long)
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(equal_resolution=False)
# Each video is a list of PIL Images
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
for video in video_inputs:
self.assertIsInstance(video, np.ndarray)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
for video in video_inputs:
self.assertIsInstance(video, torch.Tensor)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
tuple(encoded_videos.shape),
(self.video_processor_tester.batch_size, *expected_output_video_shape),
)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 8)
self.assertEqual(encoded_videos_batched.shape[1], 8)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 3)
self.assertEqual(encoded_videos_batched.shape[1], 3)
# Sample with `fps` requires metadata to infer number of frames from total duration
with self.assertRaises(ValueError):
metadata = VideoMetadata(**{"total_num_frames": 8})
video_processing.sample_frames(metadata=metadata, fps=3)
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", fps=3, video_metadata=batched_metadata
)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 6)
self.assertEqual(encoded_videos_batched.shape[1], 6)
# The same as above but uses a `VideoMetadata` object in the input
metadata = [[VideoMetadata(duration=2.0, total_num_frames=8, fps=4)]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
# We should raise error when asked to sample more frames than there are in input video
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=10)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=10)[
self.input_name
]
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
video_inputs = [list(video) for video in video_inputs]
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
tuple(encoded_videos.shape),
(self.video_processor_tester.batch_size, *expected_output_video_shape),
)
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=0.0,
image_std=1.0,
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
if video_processor.do_convert_rgb:
expected_output_video_shape = list(expected_output_video_shape)
expected_output_video_shape[1] = 3
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=0.0,
image_std=1.0,
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
if video_processor.do_convert_rgb:
expected_output_video_shape = list(expected_output_video_shape)
expected_output_video_shape[1] = 3
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_video_processor_preprocess_arguments(self):
is_tested = False
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
# validation done by _valid_processor_keys attribute
if hasattr(video_processor, "_valid_processor_keys") and hasattr(video_processor, "preprocess"):
preprocess_parameter_names = inspect.getfullargspec(video_processor.preprocess).args
preprocess_parameter_names.remove("self")
preprocess_parameter_names.sort()
valid_processor_keys = video_processor._valid_processor_keys
valid_processor_keys.sort()
self.assertEqual(preprocess_parameter_names, valid_processor_keys)
is_tested = True
# validation done by @filter_out_non_signature_kwargs decorator
if hasattr(video_processor.preprocess, "_filter_out_non_signature_kwargs"):
if hasattr(self.video_processor_tester, "prepare_video_inputs"):
inputs = self.video_processor_tester.prepare_video_inputs()
elif hasattr(self.video_processor_tester, "prepare_video_inputs"):
inputs = self.video_processor_tester.prepare_video_inputs()
else:
self.skipTest(reason="No valid input preparation method found")
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
video_processor(inputs, extra_argument=True)
messages = " ".join([str(w.message) for w in raised_warnings])
self.assertGreaterEqual(len(raised_warnings), 1)
self.assertIn("extra_argument", messages)
is_tested = True
if not is_tested:
self.skipTest(reason="No validation found for `preprocess` method")
def test_override_instance_attributes_does_not_affect_other_instances(self):
if self.fast_video_processing_class is None:
self.skipTest(
"Only testing fast video processor, as most slow processors break this test and are to be deprecated"
)
video_processing_class = self.fast_video_processing_class
video_processor_1 = video_processing_class()
video_processor_2 = video_processing_class()
if not (hasattr(video_processor_1, "size") and isinstance(video_processor_1.size, dict)) or not (
hasattr(video_processor_1, "image_mean") and isinstance(video_processor_1.image_mean, list)
):
self.skipTest(
reason="Skipping test as the image processor does not have dict size or list image_mean attributes"
)
original_size_2 = deepcopy(video_processor_2.size)
for key in video_processor_1.size:
video_processor_1.size[key] = -1
modified_copied_size_1 = deepcopy(video_processor_1.size)
original_image_mean_2 = deepcopy(video_processor_2.image_mean)
video_processor_1.image_mean[0] = -1
modified_copied_image_mean_1 = deepcopy(video_processor_1.image_mean)
# check that the original attributes of the second instance are not affected
self.assertEqual(video_processor_2.size, original_size_2)
self.assertEqual(video_processor_2.image_mean, original_image_mean_2)
for key in video_processor_2.size:
video_processor_2.size[key] = -2
video_processor_2.image_mean[0] = -2
# check that the modified attributes of the first instance are not affected by the second instance
self.assertEqual(video_processor_1.size, modified_copied_size_1)
self.assertEqual(video_processor_1.image_mean, modified_copied_image_mean_1)
| VideoProcessingTestMixin |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 1292,
"end": 1714
} | class ____(inputs.Input):
authenticators = inputs.ModelMultipleChoiceField(
queryset=Authenticator.objects.none()
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["authenticators"].queryset = Authenticator.objects.filter(
user=self.user, type=Authenticator.Type.WEBAUTHN
)
| DeleteWebAuthnInput |
python | getsentry__sentry | src/sentry/core/endpoints/organization_member_index.py | {
"start": 7373,
"end": 19986
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
"POST": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
},
)
permission_classes = (MemberAndStaffPermission,)
owner = ApiOwner.ENTERPRISE
@extend_schema(
operation_id="List an Organization's Members",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
],
responses={
200: inline_sentry_response_serializer(
"ListOrganizationMemberResponse", list[OrganizationMemberResponse]
),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationMemberExamples.LIST_ORG_MEMBERS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
List all organization members.
Response includes pending invites that are approved by organization owners or managers but waiting to be accepted by the invitee.
"""
queryset = (
OrganizationMember.objects.filter(
Q(user_is_active=True, user_id__isnull=False) | Q(user_id__isnull=True),
organization=organization,
invite_status=InviteStatus.APPROVED.value,
)
.filter(
~Exists(
OrganizationMemberInvite.objects.filter(organization_member_id=OuterRef("id"))
)
)
.order_by("id")
)
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "email":
email_user_ids = user_service.get_many_by_email(
emails=value, organization_id=organization.id, is_verified=False
)
queryset = queryset.filter(
Q(email__in=value) | Q(user_id__in=[u.id for u in email_user_ids])
)
elif key == "id":
ids = [v for v in value if v.isdigit()]
queryset = queryset.filter(id__in=ids) if ids else queryset.none()
elif key == "user.id":
user_ids = [v for v in value if v.isdigit()]
queryset = (
queryset.filter(user_id__in=user_ids) if user_ids else queryset.none()
)
elif key == "scope":
queryset = queryset.filter(role__in=[r.id for r in roles.with_any_scope(value)])
elif key == "role":
queryset = queryset.filter(role__in=value)
elif key == "isInvited":
isInvited = "true" in value
queryset = queryset.filter(user_id__isnull=isInvited)
elif key == "ssoLinked":
ssoFlag = OrganizationMember.flags["sso:linked"]
ssoLinked = "true" in value
if ssoLinked:
queryset = queryset.filter(flags=F("flags").bitor(ssoFlag))
else:
queryset = queryset.filter(flags=F("flags").bitand(~ssoFlag))
elif key == "has2fa":
has2fa = "true" in value
if has2fa:
types = [a.type for a in available_authenticators(ignore_backup=True)]
has2fa_user_ids = user_service.get_many_ids(
filter=dict(organization_id=organization.id, authenticator_types=types)
)
queryset = queryset.filter(user_id__in=has2fa_user_ids).distinct()
else:
has2fa_user_ids = user_service.get_many_ids(
filter=dict(organization_id=organization.id, authenticator_types=None)
)
queryset = queryset.filter(user_id__in=has2fa_user_ids).distinct()
elif key == "hasExternalUsers":
externalactor_user_ids = ExternalActor.objects.filter(
organization=organization,
).values_list("user_id", flat=True)
hasExternalUsers = "true" in value
if hasExternalUsers:
queryset = queryset.filter(user_id__in=externalactor_user_ids)
else:
queryset = queryset.exclude(user_id__in=externalactor_user_ids)
elif key == "query":
value_s = " ".join(value)
query_user_ids = user_service.get_many_ids(
filter=dict(query=value_s, organization_id=organization.id)
)
queryset = queryset.filter(
Q(user_id__in=query_user_ids) | Q(email__icontains=value_s)
)
else:
queryset = queryset.none()
expand = request.GET.getlist("expand", [])
return self.paginate(
request=request,
queryset=queryset,
on_results=lambda x: serialize(
x,
request.user,
serializer=OrganizationMemberSerializer(expand=expand),
),
paginator_cls=OffsetPaginator,
)
@extend_schema(
operation_id="Add a Member to an Organization",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
],
request=OrganizationMemberRequestSerializer,
responses={
201: OrganizationMemberSerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationMemberExamples.CREATE_ORG_MEMBER,
)
def post(self, request: Request, organization: Organization) -> Response:
"""
Add or invite a member to an organization.
"""
assigned_org_role = (
request.data.get("orgRole")
or request.data.get("role")
or organization_roles.get_default().id
)
billing_bypass = assigned_org_role == "billing" and features.has(
"organizations:invite-billing", organization
)
if not billing_bypass and not features.has(
"organizations:invite-members", organization, actor=request.user
):
return Response(
{"organization": "Your organization is not allowed to invite members"}, status=403
)
allowed_roles = get_allowed_org_roles(request, organization, creating_org_invite=True)
# We allow requests from integration tokens to invite new members as the member role only
if not allowed_roles and request.access.is_integration_token:
# Error if the assigned role is not a member
if assigned_org_role != "member":
raise serializers.ValidationError(
"Integration tokens are restricted to inviting new members with the member role only."
)
allowed_roles = [organization_roles.get("member")]
serializer = OrganizationMemberRequestSerializer(
data=request.data,
context={
"organization": organization,
"allowed_roles": allowed_roles,
"allow_existing_invite_request": True,
"allow_retired_roles": not features.has("organizations:team-roles", organization),
},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.validated_data
if ratelimits.for_organization_member_invite(
organization=organization,
email=result["email"],
user=request.user,
auth=request.auth,
):
metrics.incr(
"member-invite.attempt",
instance="rate_limited",
skip_internal=True,
sample_rate=1.0,
)
return Response({"detail": ERR_RATE_LIMITED}, status=429)
is_member = not request.access.has_scope("member:admin") and (
request.access.has_scope("member:invite")
)
# if Open Team Membership is disabled and Member Invites are enabled, members can only invite members to teams they are in
members_can_only_invite_to_members_teams = (
not organization.flags.allow_joinleave and not organization.flags.disable_member_invite
)
has_teams = bool(result.get("teamRoles") or result.get("teams"))
if is_member and members_can_only_invite_to_members_teams and has_teams:
requester_teams = set(
OrganizationMember.objects.filter(
organization=organization,
user_id=request.user.id,
user_is_active=True,
).values_list("teams__slug", flat=True)
)
team_slugs = list(
set(
[team.slug for team, _ in result.get("teamRoles", [])]
+ [team.slug for team in result.get("teams", [])]
)
)
# ensure that the requester is a member of all teams they are trying to assign
if not requester_teams.issuperset(team_slugs):
return Response(
{"detail": "You cannot assign members to teams you are not a member of."},
status=400,
)
if has_teams and not organization_roles.get(assigned_org_role).is_team_roles_allowed:
return Response(
{
"email": f"The user with a '{assigned_org_role}' role cannot have team-level permissions."
},
status=400,
)
with transaction.atomic(router.db_for_write(OrganizationMember)):
# remove any invitation requests for this email before inviting
existing_invite = OrganizationMember.objects.filter(
Q(invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value)
| Q(invite_status=InviteStatus.REQUESTED_TO_JOIN.value),
email=result["email"],
organization=organization,
)
for om in existing_invite:
om.delete()
om = OrganizationMember(
organization=organization,
email=result["email"],
role=assigned_org_role,
inviter_id=request.user.id,
)
if settings.SENTRY_ENABLE_INVITES:
om.token = om.generate_token()
om.save()
# Do not set team-roles when inviting members
if "teamRoles" in result or "teams" in result:
teams = (
[team for team, _ in result.get("teamRoles")]
if "teamRoles" in result and result["teamRoles"]
else result.get("teams")
)
save_team_assignments(om, teams)
if settings.SENTRY_ENABLE_INVITES and result.get("sendInvite"):
referrer = request.query_params.get("referrer")
om.send_invite_email(referrer)
member_invited.send_robust(
member=om, user=request.user, sender=self, referrer=request.data.get("referrer")
)
self.create_audit_entry(
request=request,
organization_id=organization.id,
target_object=om.id,
data=om.get_audit_log_data(),
event=(
audit_log.get_event_id("MEMBER_INVITE")
if settings.SENTRY_ENABLE_INVITES
else audit_log.get_event_id("MEMBER_ADD")
),
)
return Response(serialize(om), status=201)
| OrganizationMemberIndexEndpoint |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/cloud_memorystore.py | {
"start": 1877,
"end": 2096
} | class ____(BaseGoogleLink):
"""Helper class for constructing Memorystore Redis Instance Link."""
name = "Memorystore Redis Instance"
key = "redis_instance"
format_str = REDIS_LINK
| RedisInstanceDetailsLink |
python | Farama-Foundation__Gymnasium | gymnasium/envs/box2d/car_racing.py | {
"start": 3091,
"end": 29599
} | class ____(gym.Env, EzPickle):
"""
## Description
The easiest control task to learn from pixels - a top-down
racing environment. The generated track is random every episode.
Some indicators are shown at the bottom of the window along with the
state RGB buffer. From left to right: true speed, four ABS sensors,
steering wheel position, and gyroscope.
To play yourself (it's rather fast for humans), type:
```shell
python gymnasium/envs/box2d/car_racing.py
```
Remember: it's a powerful rear-wheel drive car - don't press the accelerator
and turn at the same time.
## Action Space
If continuous there are 3 actions :
- 0: steering, -1 is full left, +1 is full right
- 1: gas
- 2: braking
If discrete there are 5 actions:
- 0: do nothing
- 1: steer right
- 2: steer left
- 3: gas
- 4: brake
## Observation Space
A top-down 96x96 RGB image of the car and race track.
## Rewards
The reward is -0.1 every frame and +1000/N for every track tile visited, where N is the total number of tiles
visited in the track. For example, if you have finished in 732 frames, your reward is 1000 - 0.1*732 = 926.8 points.
## Starting State
The car starts at rest in the center of the road.
## Episode Termination
The episode finishes when all the tiles are visited. The car can also go outside the playfield -
that is, far off the track, in which case it will receive -100 reward and die.
## Arguments
```python
>>> import gymnasium as gym
>>> env = gym.make("CarRacing-v3", render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=False, continuous=False)
>>> env
<TimeLimit<OrderEnforcing<PassiveEnvChecker<CarRacing<CarRacing-v3>>>>>
```
* `lap_complete_percent=0.95` dictates the percentage of tiles that must be visited by
the agent before a lap is considered complete.
* `domain_randomize=False` enables the domain randomized variant of the environment.
In this scenario, the background and track colours are different on every reset.
* `continuous=True` specifies if the agent has continuous (true) or discrete (false) actions.
See action space section for a description of each.
## Reset Arguments
Passing the option `options["randomize"] = True` will change the current colour of the environment on demand.
Correspondingly, passing the option `options["randomize"] = False` will not change the current colour of the environment.
`domain_randomize` must be `True` on init for this argument to work.
```python
>>> import gymnasium as gym
>>> env = gym.make("CarRacing-v3", domain_randomize=True)
# normal reset, this changes the colour scheme by default
>>> obs, _ = env.reset()
# reset with colour scheme change
>>> randomize_obs, _ = env.reset(options={"randomize": True})
# reset with no colour scheme change
>>> non_random_obs, _ = env.reset(options={"randomize": False})
```
## Version History
- v2: Change truncation to termination when finishing the lap (1.0.0)
- v1: Change track completion logic and add domain randomization (0.24.0)
- v0: Original version
## References
- Chris Campbell (2014), http://www.iforce2d.net/b2dtut/top-down-car.
## Credits
Created by Oleg Klimov
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"state_pixels",
],
"render_fps": FPS,
}
def __init__(
self,
render_mode: str | None = None,
verbose: bool = False,
lap_complete_percent: float = 0.95,
domain_randomize: bool = False,
continuous: bool = True,
):
EzPickle.__init__(
self,
render_mode,
verbose,
lap_complete_percent,
domain_randomize,
continuous,
)
self.continuous = continuous
self.domain_randomize = domain_randomize
self.lap_complete_percent = lap_complete_percent
self._init_colors()
self.contactListener_keepref = FrictionDetector(self, self.lap_complete_percent)
self.world = Box2D.b2World((0, 0), contactListener=self.contactListener_keepref)
self.screen: pygame.Surface | None = None
self.surf = None
self.clock = None
self.isopen = True
self.invisible_state_window = None
self.invisible_video_window = None
self.road = None
self.car: Car | None = None
self.reward = 0.0
self.prev_reward = 0.0
self.verbose = verbose
self.new_lap = False
self.fd_tile = fixtureDef(
shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)])
)
# This will throw a warning in tests/envs/test_envs in utils/env_checker.py as the space is not symmetric
# or normalised however this is not possible here so ignore
if self.continuous:
self.action_space = spaces.Box(
np.array([-1, 0, 0]).astype(np.float32),
np.array([+1, +1, +1]).astype(np.float32),
) # steer, gas, brake
else:
self.action_space = spaces.Discrete(5)
# do nothing, right, left, gas, brake
self.observation_space = spaces.Box(
low=0, high=255, shape=(STATE_H, STATE_W, 3), dtype=np.uint8
)
self.render_mode = render_mode
def _destroy(self):
if not self.road:
return
for t in self.road:
self.world.DestroyBody(t)
self.road = []
assert self.car is not None
self.car.destroy()
def _init_colors(self):
if self.domain_randomize:
# domain randomize the bg and grass colour
self.road_color = self.np_random.uniform(0, 210, size=3)
self.bg_color = self.np_random.uniform(0, 210, size=3)
self.grass_color = np.copy(self.bg_color)
idx = self.np_random.integers(3)
self.grass_color[idx] += 20
else:
# default colours
self.road_color = np.array([102, 102, 102])
self.bg_color = np.array([102, 204, 102])
self.grass_color = np.array([102, 230, 102])
def _reinit_colors(self, randomize):
assert (
self.domain_randomize
), "domain_randomize must be True to use this function."
if randomize:
# domain randomize the bg and grass colour
self.road_color = self.np_random.uniform(0, 210, size=3)
self.bg_color = self.np_random.uniform(0, 210, size=3)
self.grass_color = np.copy(self.bg_color)
idx = self.np_random.integers(3)
self.grass_color[idx] += 20
def _create_track(self):
CHECKPOINTS = 12
# Create checkpoints
checkpoints = []
for c in range(CHECKPOINTS):
noise = self.np_random.uniform(0, 2 * math.pi * 1 / CHECKPOINTS)
alpha = 2 * math.pi * c / CHECKPOINTS + noise
rad = self.np_random.uniform(TRACK_RAD / 3, TRACK_RAD)
if c == 0:
alpha = 0
rad = 1.5 * TRACK_RAD
if c == CHECKPOINTS - 1:
alpha = 2 * math.pi * c / CHECKPOINTS
self.start_alpha = 2 * math.pi * (-0.5) / CHECKPOINTS
rad = 1.5 * TRACK_RAD
checkpoints.append((alpha, rad * math.cos(alpha), rad * math.sin(alpha)))
self.road = []
# Go from one checkpoint to another to create track
x, y, beta = 1.5 * TRACK_RAD, 0, 0
dest_i = 0
laps = 0
track = []
no_freeze = 2500
visited_other_side = False
while True:
alpha = math.atan2(y, x)
if visited_other_side and alpha > 0:
laps += 1
visited_other_side = False
if alpha < 0:
visited_other_side = True
alpha += 2 * math.pi
while True: # Find destination from checkpoints
failed = True
while True:
dest_alpha, dest_x, dest_y = checkpoints[dest_i % len(checkpoints)]
if alpha <= dest_alpha:
failed = False
break
dest_i += 1
if dest_i % len(checkpoints) == 0:
break
if not failed:
break
alpha -= 2 * math.pi
continue
r1x = math.cos(beta)
r1y = math.sin(beta)
p1x = -r1y
p1y = r1x
dest_dx = dest_x - x # vector towards destination
dest_dy = dest_y - y
# destination vector projected on rad:
proj = r1x * dest_dx + r1y * dest_dy
while beta - alpha > 1.5 * math.pi:
beta -= 2 * math.pi
while beta - alpha < -1.5 * math.pi:
beta += 2 * math.pi
prev_beta = beta
proj *= SCALE
if proj > 0.3:
beta -= min(TRACK_TURN_RATE, abs(0.001 * proj))
if proj < -0.3:
beta += min(TRACK_TURN_RATE, abs(0.001 * proj))
x += p1x * TRACK_DETAIL_STEP
y += p1y * TRACK_DETAIL_STEP
track.append((alpha, prev_beta * 0.5 + beta * 0.5, x, y))
if laps > 4:
break
no_freeze -= 1
if no_freeze == 0:
break
# Find closed loop range i1..i2, first loop should be ignored, second is OK
i1, i2 = -1, -1
i = len(track)
while True:
i -= 1
if i == 0:
return False # Failed
pass_through_start = (
track[i][0] > self.start_alpha and track[i - 1][0] <= self.start_alpha
)
if pass_through_start and i2 == -1:
i2 = i
elif pass_through_start and i1 == -1:
i1 = i
break
if self.verbose:
print("Track generation: %i..%i -> %i-tiles track" % (i1, i2, i2 - i1))
assert i1 != -1
assert i2 != -1
track = track[i1 : i2 - 1]
first_beta = track[0][1]
first_perp_x = math.cos(first_beta)
first_perp_y = math.sin(first_beta)
# Length of perpendicular jump to put together head and tail
well_glued_together = np.sqrt(
np.square(first_perp_x * (track[0][2] - track[-1][2]))
+ np.square(first_perp_y * (track[0][3] - track[-1][3]))
)
if well_glued_together > TRACK_DETAIL_STEP:
return False
# Red-white border on hard turns
border = [False] * len(track)
for i in range(len(track)):
good = True
oneside = 0
for neg in range(BORDER_MIN_COUNT):
beta1 = track[i - neg - 0][1]
beta2 = track[i - neg - 1][1]
good &= abs(beta1 - beta2) > TRACK_TURN_RATE * 0.2
oneside += np.sign(beta1 - beta2)
good &= abs(oneside) == BORDER_MIN_COUNT
border[i] = good
for i in range(len(track)):
for neg in range(BORDER_MIN_COUNT):
border[i - neg] |= border[i]
# Create tiles
for i in range(len(track)):
alpha1, beta1, x1, y1 = track[i]
alpha2, beta2, x2, y2 = track[i - 1]
road1_l = (
x1 - TRACK_WIDTH * math.cos(beta1),
y1 - TRACK_WIDTH * math.sin(beta1),
)
road1_r = (
x1 + TRACK_WIDTH * math.cos(beta1),
y1 + TRACK_WIDTH * math.sin(beta1),
)
road2_l = (
x2 - TRACK_WIDTH * math.cos(beta2),
y2 - TRACK_WIDTH * math.sin(beta2),
)
road2_r = (
x2 + TRACK_WIDTH * math.cos(beta2),
y2 + TRACK_WIDTH * math.sin(beta2),
)
vertices = [road1_l, road1_r, road2_r, road2_l]
self.fd_tile.shape.vertices = vertices
t = self.world.CreateStaticBody(fixtures=self.fd_tile)
t.userData = t
c = 0.01 * (i % 3) * 255
t.color = self.road_color + c
t.road_visited = False
t.road_friction = 1.0
t.idx = i
t.fixtures[0].sensor = True
self.road_poly.append(([road1_l, road1_r, road2_r, road2_l], t.color))
self.road.append(t)
if border[i]:
side = np.sign(beta2 - beta1)
b1_l = (
x1 + side * TRACK_WIDTH * math.cos(beta1),
y1 + side * TRACK_WIDTH * math.sin(beta1),
)
b1_r = (
x1 + side * (TRACK_WIDTH + BORDER) * math.cos(beta1),
y1 + side * (TRACK_WIDTH + BORDER) * math.sin(beta1),
)
b2_l = (
x2 + side * TRACK_WIDTH * math.cos(beta2),
y2 + side * TRACK_WIDTH * math.sin(beta2),
)
b2_r = (
x2 + side * (TRACK_WIDTH + BORDER) * math.cos(beta2),
y2 + side * (TRACK_WIDTH + BORDER) * math.sin(beta2),
)
self.road_poly.append(
(
[b1_l, b1_r, b2_r, b2_l],
(255, 255, 255) if i % 2 == 0 else (255, 0, 0),
)
)
self.track = track
return True
def reset(
self,
*,
seed: int | None = None,
options: dict | None = None,
):
super().reset(seed=seed)
self._destroy()
self.world.contactListener_bug_workaround = FrictionDetector(
self, self.lap_complete_percent
)
self.world.contactListener = self.world.contactListener_bug_workaround
self.reward = 0.0
self.prev_reward = 0.0
self.tile_visited_count = 0
self.t = 0.0
self.new_lap = False
self.road_poly = []
if self.domain_randomize:
randomize = True
if isinstance(options, dict):
if "randomize" in options:
randomize = options["randomize"]
self._reinit_colors(randomize)
while True:
success = self._create_track()
if success:
break
if self.verbose:
print(
"retry to generate track (normal if there are not many"
"instances of this message)"
)
self.car = Car(self.world, *self.track[0][1:4])
if self.render_mode == "human":
self.render()
return self.step(None)[0], {}
def step(self, action: np.ndarray | int):
assert self.car is not None
if action is not None:
if self.continuous:
action = action.astype(np.float64)
self.car.steer(-action[0])
self.car.gas(action[1])
self.car.brake(action[2])
else:
if not self.action_space.contains(action):
raise InvalidAction(
f"you passed the invalid action `{action}`. "
f"The supported action_space is `{self.action_space}`"
)
self.car.steer(-0.6 * (action == 1) + 0.6 * (action == 2))
self.car.gas(0.2 * (action == 3))
self.car.brake(0.8 * (action == 4))
self.car.step(1.0 / FPS)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
self.t += 1.0 / FPS
self.state = self._render("state_pixels")
step_reward = 0
terminated = False
truncated = False
info = {}
if action is not None: # First step without action, called from reset()
self.reward -= 0.1
# We actually don't want to count fuel spent, we want car to be faster.
# self.reward -= 10 * self.car.fuel_spent / ENGINE_POWER
self.car.fuel_spent = 0.0
step_reward = self.reward - self.prev_reward
self.prev_reward = self.reward
if self.tile_visited_count == len(self.track) or self.new_lap:
# Termination due to finishing lap
terminated = True
info["lap_finished"] = True
x, y = self.car.hull.position
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
terminated = True
info["lap_finished"] = False
step_reward = -100
if self.render_mode == "human":
self.render()
return self.state, step_reward, terminated, truncated, info
def render(self):
if self.render_mode is None:
assert self.spec is not None
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")'
)
return
else:
return self._render(self.render_mode)
def _render(self, mode: str):
assert mode in self.metadata["render_modes"]
pygame.font.init()
if self.screen is None and mode == "human":
pygame.init()
pygame.display.init()
self.screen = pygame.display.set_mode((WINDOW_W, WINDOW_H))
if self.clock is None:
self.clock = pygame.time.Clock()
if "t" not in self.__dict__:
return # reset() not called yet
self.surf = pygame.Surface((WINDOW_W, WINDOW_H))
assert self.car is not None
# computing transformations
angle = -self.car.hull.angle
# Animating first second zoom.
zoom = 0.1 * SCALE * max(1 - self.t, 0) + ZOOM * SCALE * min(self.t, 1)
scroll_x = -(self.car.hull.position[0]) * zoom
scroll_y = -(self.car.hull.position[1]) * zoom
trans = pygame.math.Vector2((scroll_x, scroll_y)).rotate_rad(angle)
trans = (WINDOW_W / 2 + trans[0], WINDOW_H / 4 + trans[1])
self._render_road(zoom, trans, angle)
self.car.draw(
self.surf,
zoom,
trans,
angle,
mode not in ["state_pixels_list", "state_pixels"],
)
self.surf = pygame.transform.flip(self.surf, False, True)
# showing stats
self._render_indicators(WINDOW_W, WINDOW_H)
font = pygame.font.Font(pygame.font.get_default_font(), 42)
text = font.render("%04i" % self.reward, True, (255, 255, 255), (0, 0, 0))
text_rect = text.get_rect()
text_rect.center = (60, WINDOW_H - WINDOW_H * 2.5 / 40.0)
self.surf.blit(text, text_rect)
if mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
assert self.screen is not None
self.screen.fill(0)
self.screen.blit(self.surf, (0, 0))
pygame.display.flip()
elif mode == "rgb_array":
return self._create_image_array(self.surf, (VIDEO_W, VIDEO_H))
elif mode == "state_pixels":
return self._create_image_array(self.surf, (STATE_W, STATE_H))
else:
return self.isopen
def _render_road(self, zoom, translation, angle):
bounds = PLAYFIELD
field = [
(bounds, bounds),
(bounds, -bounds),
(-bounds, -bounds),
(-bounds, bounds),
]
# draw background
self._draw_colored_polygon(
self.surf, field, self.bg_color, zoom, translation, angle, clip=False
)
# draw grass patches
grass = []
for x in range(-20, 20, 2):
for y in range(-20, 20, 2):
grass.append(
[
(GRASS_DIM * x + GRASS_DIM, GRASS_DIM * y + 0),
(GRASS_DIM * x + 0, GRASS_DIM * y + 0),
(GRASS_DIM * x + 0, GRASS_DIM * y + GRASS_DIM),
(GRASS_DIM * x + GRASS_DIM, GRASS_DIM * y + GRASS_DIM),
]
)
for poly in grass:
self._draw_colored_polygon(
self.surf, poly, self.grass_color, zoom, translation, angle
)
# draw road
for poly, color in self.road_poly:
# converting to pixel coordinates
poly = [(p[0], p[1]) for p in poly]
color = [int(c) for c in color]
self._draw_colored_polygon(self.surf, poly, color, zoom, translation, angle)
def _render_indicators(self, W, H):
s = W / 40.0
h = H / 40.0
color = (0, 0, 0)
polygon = [(W, H), (W, H - 5 * h), (0, H - 5 * h), (0, H)]
pygame.draw.polygon(self.surf, color=color, points=polygon)
def vertical_ind(place, val):
return [
(place * s, H - (h + h * val)),
((place + 1) * s, H - (h + h * val)),
((place + 1) * s, H - h),
((place + 0) * s, H - h),
]
def horiz_ind(place, val):
return [
((place + 0) * s, H - 4 * h),
((place + val) * s, H - 4 * h),
((place + val) * s, H - 2 * h),
((place + 0) * s, H - 2 * h),
]
assert self.car is not None
true_speed = np.sqrt(
np.square(self.car.hull.linearVelocity[0])
+ np.square(self.car.hull.linearVelocity[1])
)
# simple wrapper to render if the indicator value is above a threshold
def render_if_min(value, points, color):
if abs(value) > 1e-4:
pygame.draw.polygon(self.surf, points=points, color=color)
render_if_min(true_speed, vertical_ind(5, 0.02 * true_speed), (255, 255, 255))
# ABS sensors
render_if_min(
self.car.wheels[0].omega,
vertical_ind(7, 0.01 * self.car.wheels[0].omega),
(0, 0, 255),
)
render_if_min(
self.car.wheels[1].omega,
vertical_ind(8, 0.01 * self.car.wheels[1].omega),
(0, 0, 255),
)
render_if_min(
self.car.wheels[2].omega,
vertical_ind(9, 0.01 * self.car.wheels[2].omega),
(51, 0, 255),
)
render_if_min(
self.car.wheels[3].omega,
vertical_ind(10, 0.01 * self.car.wheels[3].omega),
(51, 0, 255),
)
render_if_min(
self.car.wheels[0].joint.angle,
horiz_ind(20, -10.0 * self.car.wheels[0].joint.angle),
(0, 255, 0),
)
render_if_min(
self.car.hull.angularVelocity,
horiz_ind(30, -0.8 * self.car.hull.angularVelocity),
(255, 0, 0),
)
def _draw_colored_polygon(
self, surface, poly, color, zoom, translation, angle, clip=True
):
poly = [pygame.math.Vector2(c).rotate_rad(angle) for c in poly]
poly = [
(c[0] * zoom + translation[0], c[1] * zoom + translation[1]) for c in poly
]
# This checks if the polygon is out of bounds of the screen, and we skip drawing if so.
# Instead of calculating exactly if the polygon and screen overlap,
# we simply check if the polygon is in a larger bounding box whose dimension
# is greater than the screen by MAX_SHAPE_DIM, which is the maximum
# diagonal length of an environment object
if not clip or any(
(-MAX_SHAPE_DIM <= coord[0] <= WINDOW_W + MAX_SHAPE_DIM)
and (-MAX_SHAPE_DIM <= coord[1] <= WINDOW_H + MAX_SHAPE_DIM)
for coord in poly
):
gfxdraw.aapolygon(self.surf, poly, color)
gfxdraw.filled_polygon(self.surf, poly, color)
def _create_image_array(self, screen, size):
scaled_screen = pygame.transform.smoothscale(screen, size)
return np.transpose(
np.array(pygame.surfarray.pixels3d(scaled_screen)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
pygame.display.quit()
self.isopen = False
pygame.quit()
if __name__ == "__main__":
a = np.array([0.0, 0.0, 0.0])
def register_input():
global quit, restart
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
a[0] = -1.0
if event.key == pygame.K_RIGHT:
a[0] = +1.0
if event.key == pygame.K_UP:
a[1] = +1.0
if event.key == pygame.K_DOWN:
a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
if event.key == pygame.K_RETURN:
restart = True
if event.key == pygame.K_ESCAPE:
quit = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
a[0] = 0
if event.key == pygame.K_RIGHT:
a[0] = 0
if event.key == pygame.K_UP:
a[1] = 0
if event.key == pygame.K_DOWN:
a[2] = 0
if event.type == pygame.QUIT:
quit = True
env = CarRacing(render_mode="human")
quit = False
while not quit:
env.reset()
total_reward = 0.0
steps = 0
restart = False
while True:
register_input()
s, r, terminated, truncated, info = env.step(a)
total_reward += r
if steps % 200 == 0 or terminated or truncated:
print("\naction " + str([f"{x:+0.2f}" for x in a]))
print(f"step {steps} total_reward {total_reward:+0.2f}")
steps += 1
if terminated or truncated or restart or quit:
break
env.close()
| CarRacing |
python | vyperlang__vyper | vyper/venom/passes/dead_store_elimination.py | {
"start": 573,
"end": 5974
} | class ____(IRPass):
"""
This pass eliminates dead stores using Memory SSA analysis.
"""
def run_pass(self, /, addr_space: AddrSpace):
mem_ssa_type = mem_ssa_type_factory(addr_space)
if addr_space == MEMORY:
self.NON_RELATED_EFFECTS = NON_MEMORY_EFFECTS
elif addr_space == STORAGE:
self.NON_RELATED_EFFECTS = NON_STORAGE_EFFECTS
elif addr_space == TRANSIENT:
self.NON_RELATED_EFFECTS = NON_TRANSIENT_EFFECTS
volatiles: list[MemoryLocation] = []
while True:
change = False
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.cfg = self.analyses_cache.request_analysis(CFGAnalysis)
self.mem_ssa = self.analyses_cache.request_analysis(mem_ssa_type)
for volatile_loc in volatiles:
self.mem_ssa.mark_location_volatile(volatile_loc)
volatiles = self.mem_ssa.volatiles.copy()
self.updater = InstUpdater(self.dfg)
# Go through all memory definitions and eliminate dead stores
for mem_def in self.mem_ssa.get_memory_defs():
if self._is_dead_store(mem_def):
change = True
self.updater.nop(mem_def.store_inst, annotation="[dead store elimination]")
if not change:
break
self.analyses_cache.invalidate_analysis(DFGAnalysis)
self.analyses_cache.invalidate_analysis(mem_ssa_type)
def _has_uses(self, inst: IRInstruction):
"""
Checks if the instruction's output is used in the DFG.
"""
return any(len(self.dfg.get_uses(output)) > 0 for output in inst.get_outputs())
def _is_memory_def_live(self, query_def: MemoryDef) -> bool:
"""
Checks if the memory definition is live by checking if it is
read from in any of the blocks that are reachable from the
memory definition's block, without being clobbered by another
memory access before read.
"""
query_loc = query_def.loc
worklist: OrderedSet[IRBasicBlock] = OrderedSet()
# blocks not to visit
visited: OrderedSet[IRBasicBlock] = OrderedSet()
# for the first block, we start from the instruction after mem_def.inst
next_inst_idx = query_def.inst.parent.instructions.index(query_def.inst) + 1
# we don't add this to visited because in the case of a loop
# (bb is reachable from itself), we want to be able to visit it again
# starting from instruction 0.
worklist.add(query_def.inst.parent)
while len(worklist) > 0:
bb = worklist.pop()
clobbered = False
for inst in bb.instructions[next_inst_idx:]:
# Check if the instruction reads from the memory location
# If so, the memory definition is used.
mem_use = self.mem_ssa.get_memory_use(inst)
if mem_use is not None:
read_loc = mem_use.loc
if self.mem_ssa.memalias.may_alias(read_loc, query_loc):
return True
# Check if the instruction writes to the memory location
# and it clobbers the memory definition. In this case,
# we continue to the next block already in the worklist.
mem_def = self.mem_ssa.get_memory_def(inst)
if mem_def is not None:
write_loc = mem_def.loc
if write_loc.completely_contains(query_loc):
clobbered = True
break
# If the memory definition is clobbered, we continue to
# the next block already in the worklist without adding
# its offspring to the worklist.
if clobbered:
continue
# Otherwise, we add the block's offsprings to the worklist.
# for all successor blocks, start from the 0'th instruction
next_inst_idx = 0
outs = self.cfg.cfg_out(bb)
for out in outs:
if out not in visited:
visited.add(out)
worklist.add(out)
return False
def _is_dead_store(self, mem_def: MemoryDef) -> bool:
"""
Checks if the memory definition is a dead store.
"""
# Volatile memory locations are never dead stores.
if mem_def.loc.is_volatile is True:
return False
# Memory locations with unknown offset or size are never dead stores.
if not mem_def.loc.is_fixed:
return False
# If the instruction output is used, it is not a dead store.
if self._has_uses(mem_def.store_inst):
return False
# If the instruction has other effects than writing to memory,
# it is not a dead store.
inst = mem_def.store_inst
write_effects = inst.get_write_effects()
read_effects = inst.get_read_effects()
has_other_effects = (write_effects | read_effects) & self.NON_RELATED_EFFECTS
if has_other_effects:
return False
# If the memory definition is clobbered by another memory access,
# it is a dead store.
return not self._is_memory_def_live(mem_def)
| DeadStoreElimination |
python | sqlalchemy__sqlalchemy | test/sql/test_case_statement.py | {
"start": 598,
"end": 10900
} | class ____(fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = "default"
run_inserts = "once"
run_deletes = "never"
@classmethod
def define_tables(cls, metadata):
Table(
"info_table",
metadata,
Column("pk", Integer, primary_key=True),
Column("info", String(30)),
)
@classmethod
def insert_data(cls, connection):
info_table = cls.tables.info_table
connection.execute(
info_table.insert(),
[
{"pk": 1, "info": "pk_1_data"},
{"pk": 2, "info": "pk_2_data"},
{"pk": 3, "info": "pk_3_data"},
{"pk": 4, "info": "pk_4_data"},
{"pk": 5, "info": "pk_5_data"},
{"pk": 6, "info": "pk_6_data"},
],
)
connection.commit()
@testing.requires.subqueries
def test_case(self, connection):
info_table = self.tables.info_table
inner = select(
case(
(info_table.c.pk < 3, "lessthan3"),
(and_(info_table.c.pk >= 3, info_table.c.pk < 7), "gt3"),
).label("x"),
info_table.c.pk,
info_table.c.info,
).select_from(info_table)
inner_result = connection.execute(inner).all()
# Outputs:
# lessthan3 1 pk_1_data
# lessthan3 2 pk_2_data
# gt3 3 pk_3_data
# gt3 4 pk_4_data
# gt3 5 pk_5_data
# gt3 6 pk_6_data
eq_(
inner_result,
[
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
],
)
outer = select(inner.alias("q_inner"))
outer_result = connection.execute(outer).all()
assert outer_result == [
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
]
w_else = select(
case(
[info_table.c.pk < 3, cast(3, Integer)],
[and_(info_table.c.pk >= 3, info_table.c.pk < 6), 6],
else_=0,
).label("x"),
info_table.c.pk,
info_table.c.info,
).select_from(info_table)
else_result = connection.execute(w_else).all()
eq_(
else_result,
[
(3, 1, "pk_1_data"),
(3, 2, "pk_2_data"),
(6, 3, "pk_3_data"),
(6, 4, "pk_4_data"),
(6, 5, "pk_5_data"),
(0, 6, "pk_6_data"),
],
)
def test_literal_interpretation_one(self):
"""note this is modified as of #7287 to accept strings, tuples
and other literal values as input
where they are interpreted as bound values just like any other
expression.
Previously, an exception would be raised that the literal was
ambiguous.
"""
self.assert_compile(
case(("x", "y")),
"CASE WHEN :param_1 THEN :param_2 END",
checkparams={"param_1": "x", "param_2": "y"},
)
def test_literal_interpretation_two(self):
"""note this is modified as of #7287 to accept strings, tuples
and other literal values as input
where they are interpreted as bound values just like any other
expression.
Previously, an exception would be raised that the literal was
ambiguous.
"""
self.assert_compile(
case(
(("x", "y"), "z"),
),
"CASE WHEN :param_1 THEN :param_2 END",
checkparams={"param_1": ("x", "y"), "param_2": "z"},
)
def test_literal_interpretation_two_point_five(self):
"""note this is modified as of #7287 to accept strings, tuples
and other literal values as input
where they are interpreted as bound values just like any other
expression.
Previously, an exception would be raised that the literal was
ambiguous.
"""
self.assert_compile(
case(
(12, "z"),
),
"CASE WHEN :param_1 THEN :param_2 END",
checkparams={"param_1": 12, "param_2": "z"},
)
def test_literal_interpretation_three(self):
t = table("test", column("col1"))
self.assert_compile(
case(("x", "y"), value=t.c.col1),
"CASE test.col1 WHEN :param_1 THEN :param_2 END",
)
self.assert_compile(
case((t.c.col1 == 7, "y"), else_="z"),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END",
)
@testing.combinations(
(
(lambda t: ({"x": "y"}, t.c.col1, None)),
"CASE test.col1 WHEN :param_1 THEN :param_2 END",
),
(
(lambda t: ({"x": "y", "p": "q"}, t.c.col1, None)),
"CASE test.col1 WHEN :param_1 THEN :param_2 "
"WHEN :param_3 THEN :param_4 END",
),
(
(lambda t: ({t.c.col1 == 7: "x"}, None, 10)),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END",
),
(
(lambda t: ({t.c.col1 == 7: "x", t.c.col1 == 10: "y"}, None, 10)),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 "
"WHEN (test.col1 = :col1_2) THEN :param_2 ELSE :param_3 END",
),
argnames="test_case, expected",
)
def test_when_dicts(self, test_case, expected):
t = table("test", column("col1"))
when_dict, value, else_ = testing.resolve_lambda(test_case, t=t)
self.assert_compile(
case(when_dict, value=value, else_=else_), expected
)
def test_text_doesnt_explode(self, connection):
info_table = self.tables.info_table
for s in [
select(
case(
(info_table.c.info == "pk_4_data", text("'yes'")),
else_=text("'no'"),
)
).order_by(info_table.c.info),
select(
case(
(
info_table.c.info == "pk_4_data",
literal_column("'yes'"),
),
else_=literal_column("'no'"),
)
).order_by(info_table.c.info),
]:
eq_(
connection.execute(s).all(),
[("no",), ("no",), ("no",), ("yes",), ("no",), ("no",)],
)
def test_text_doenst_explode_even_in_whenlist(self):
"""test #7287"""
self.assert_compile(
case(
(text(":case = 'upper'"), func.upper(literal_column("q"))),
else_=func.lower(literal_column("q")),
),
"CASE WHEN :case = 'upper' THEN upper(q) ELSE lower(q) END",
)
def testcase_with_dict(self):
info_table = self.tables.info_table
query = select(
case(
{
info_table.c.pk < 3: "lessthan3",
info_table.c.pk >= 3: "gt3",
},
else_="other",
),
info_table.c.pk,
info_table.c.info,
).select_from(info_table)
eq_(
query.execute().fetchall(),
[
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
],
)
simple_query = (
select(
case(
{1: "one", 2: "two"}, value=info_table.c.pk, else_="other"
),
info_table.c.pk,
)
.where(info_table.c.pk < 4)
.select_from(info_table)
)
assert simple_query.execute().fetchall() == [
("one", 1),
("two", 2),
("other", 3),
]
@testing.variation("add_else", [True, False])
def test_type_of_case_expression_with_all_nulls(self, add_else):
info_table = self.tables.info_table
expr = case(
(info_table.c.pk < 0, None),
(info_table.c.pk > 9, None),
else_=column("q") if add_else else None,
)
assert isinstance(expr.type, NullType)
@testing.combinations(
lambda info_table: (
[
# test non-None in middle of WHENS takes precedence over Nones
(info_table.c.pk < 0, None),
(info_table.c.pk < 5, "five"),
(info_table.c.pk <= 9, info_table.c.pk),
(info_table.c.pk > 9, None),
],
None,
),
lambda info_table: (
# test non-None ELSE takes precedence over WHENs that are None
[(info_table.c.pk < 0, None)],
info_table.c.pk,
),
lambda info_table: (
# test non-None WHEN takes precedence over non-None ELSE
[
(info_table.c.pk < 0, None),
(info_table.c.pk <= 9, info_table.c.pk),
(info_table.c.pk > 9, None),
],
column("q", String),
),
lambda info_table: (
# test last WHEN in list takes precedence
[
(info_table.c.pk < 0, String),
(info_table.c.pk > 9, None),
(info_table.c.pk <= 9, info_table.c.pk),
],
column("q", String),
),
)
def test_type_of_case_expression(self, when_lambda):
info_table = self.tables.info_table
whens, else_ = testing.resolve_lambda(
when_lambda, info_table=info_table
)
expr = case(*whens, else_=else_)
assert isinstance(expr.type, Integer)
| CaseTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/batchtospace_op_test.py | {
"start": 4520,
"end": 4650
} | class ____(BatchToSpaceErrorHandlingTest,
CppOpImpl):
pass
| BatchToSpaceErrorHandlingCppTest |
python | great-expectations__great_expectations | docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py | {
"start": 1460,
"end": 3596
} | class ____(ColumnAggregateMetricProvider):
# </snippet>
"""MetricProvider Class for Custom Aggregate Max MetricProvider"""
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py metric_name">
metric_name = "column.custom_max"
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py _pandas">
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
"""Pandas Max Implementation"""
return column.max()
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py sql_def">
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py sql_selectable">
(
selectable,
_,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column_name = accessor_domain_kwargs["column"]
column = sa.column(column_name)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py sql_query">
query = sa.select(sa.func.max(column)).select_from(selectable)
result = execution_engine.execute_query(query).fetchone()
return result[0]
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py _spark">
@column_aggregate_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, _table, _column_name, **kwargs):
"""Spark Max Implementation"""
return F.max(column)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_max_to_be_between_custom.py ExpectColumnMaxToBeBetween class_def">
| ColumnCustomMax |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial004_py310.py | {
"start": 329,
"end": 3455
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(
default=None, foreign_key="team.id", ondelete="RESTRICT"
)
team: Team | None = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
with engine.connect() as connection:
connection.execute(text("PRAGMA foreign_keys=ON")) # for SQLite only
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion has no team:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E has no team:", hero)
def main():
create_db_and_tables()
create_heroes()
delete_team()
if __name__ == "__main__":
main()
| Hero |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 59920,
"end": 63976
} | class ____(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marquardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covariance matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If set, the parameter bounds for a model will be enforced for each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(
self, model, farg, fkwarg, maxiter, acc, epsilon, estimate_jacobian
):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, *args, **context):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(
params, model, weights, *args, fit_param_indices=None
)
)
else:
return self._wrap_deriv(
params, model, weights, *args, fit_param_indices=None
)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
kwargs=fkwarg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
| _NLLSQFitter |
python | doocs__leetcode | solution/0600-0699/0673.Number of Longest Increasing Subsequence/Solution.py | {
"start": 0,
"end": 609
} | class ____:
def findNumberOfLIS(self, nums: List[int]) -> int:
n = len(nums)
f = [1] * n
cnt = [1] * n
mx = 0
for i in range(n):
for j in range(i):
if nums[j] < nums[i]:
if f[i] < f[j] + 1:
f[i] = f[j] + 1
cnt[i] = cnt[j]
elif f[i] == f[j] + 1:
cnt[i] += cnt[j]
if mx < f[i]:
mx = f[i]
ans = cnt[i]
elif mx == f[i]:
ans += cnt[i]
return ans
| Solution |
python | doocs__leetcode | solution/1000-1099/1047.Remove All Adjacent Duplicates In String/Solution.py | {
"start": 0,
"end": 239
} | class ____:
def removeDuplicates(self, s: str) -> str:
stk = []
for c in s:
if stk and stk[-1] == c:
stk.pop()
else:
stk.append(c)
return ''.join(stk)
| Solution |
python | getsentry__sentry | src/sentry/api/serializers/models/release.py | {
"start": 7849,
"end": 14404
} | class ____(TypedDict):
name: str | None
email: str
Author = Union[UserSerializerResponse, NonMappableUser]
def get_author_users_by_external_actors(
authors: list[CommitAuthor], organization_id: int
) -> tuple[dict[CommitAuthor, str], list[CommitAuthor]]:
found: dict[CommitAuthor, str] = {}
usernames_to_authors: dict[str, CommitAuthor] = {}
for author in authors:
username = author.get_username_from_external_id()
if username:
# ExternalActor.external_name includes @ prefix
# (e.g., "@username") for GitHub and GitLab
usernames_to_authors[f"@{username}"] = author
if not usernames_to_authors:
return found, authors
external_actors = (
ExternalActor.objects.filter(
external_name__in=list(usernames_to_authors.keys()),
organization_id=organization_id,
user_id__isnull=False, # excludes team mappings
)
.order_by("id")
.values_list("user_id", "external_name")
)
if not external_actors:
return found, authors
missed: dict[int, CommitAuthor] = {a.id: a for a in authors}
for user_id, external_name in external_actors:
if external_name in usernames_to_authors:
found_author = usernames_to_authors[external_name]
found[found_author] = str(user_id)
missed.pop(found_author.id, None)
return found, list(missed.values())
def get_author_users_by_email(
authors: list[CommitAuthor], organization_id: int
) -> tuple[dict[CommitAuthor, str], list[CommitAuthor]]:
author_email_map: dict[str, CommitAuthor] = {a.email.lower(): a for a in authors}
users: list[RpcUser] = user_service.get_many(
filter={
"emails": list(author_email_map.keys()),
"organization_id": organization_id,
"is_active": True,
}
)
if not users:
return {}, authors
missed: dict[int, CommitAuthor] = {a.id: a for a in authors}
primary_match: dict[CommitAuthor, str] = {}
secondary_match: dict[CommitAuthor, str] = {}
for user in users:
primary_email = user.email.lower()
if primary_email in author_email_map:
found_author = author_email_map[primary_email]
primary_match[found_author] = str(user.id)
missed.pop(found_author.id, None)
for email in user.emails:
secondary_email = email.lower()
if secondary_email in author_email_map:
found_author = author_email_map[secondary_email]
if found_author not in primary_match:
secondary_match[found_author] = str(user.id)
missed.pop(found_author.id, None)
# merge matches, primary_match is kept if collision
found: dict[CommitAuthor, str] = secondary_match | primary_match
return found, list(missed.values())
def get_cached_results(
authors: list[CommitAuthor], organization_id: int
) -> tuple[dict[str, Author], list[CommitAuthor]]:
cached_results: dict[str, Author] = {}
fetched = cache.get_many(
[_user_to_author_cache_key(organization_id, author) for author in authors]
)
if not fetched:
return cached_results, authors
missed = []
for author in authors:
fetched_user = fetched.get(_user_to_author_cache_key(organization_id, author))
if fetched_user is None:
missed.append(author)
else:
cached_results[str(author.id)] = fetched_user
return cached_results, missed
def get_users_for_authors(
organization_id: int,
authors: list[CommitAuthor],
user: User | AnonymousUser | RpcUser | None = None,
) -> Mapping[str, Author]:
"""
Returns a dictionary of commit_author_id => user, if a Sentry
user object exists for that email. If there is no matching
Sentry user, a {user, email} dict representation of that
commit author is returned.
e.g.
{
'<commit-author-id-1>': serialized(<User id=1, ...>),
'<commit-author-id-2>': {'email': 'not-a-user@example.com', 'name': 'dunno'},
'<commit-author-id-3>': serialized(<User id=3, ...>),
...
}
"""
cached_results, missed = get_cached_results(authors, organization_id)
if not missed:
metrics.incr("sentry.release.get_users_for_authors.missed", amount=0)
metrics.incr("sentry.release.get_users_for_authors.total", amount=len(cached_results))
return cached_results
# User Mappings take precedence over email lookup (higher signal)
external_actor_results, remaining_missed_authors = get_author_users_by_external_actors(
missed, organization_id
)
if remaining_missed_authors:
email_results, remaining_missed_authors = get_author_users_by_email(
remaining_missed_authors, organization_id
)
else:
email_results = {}
unserialized_results: Mapping[CommitAuthor, str] = {
**external_actor_results,
**email_results,
}
serialized_users: Sequence[UserSerializerResponse] = user_service.serialize_many(
filter={"user_ids": list(unserialized_results.values())},
as_user=serialize_generic_user(user),
)
user_id_to_serialized_user_map: dict[str, UserSerializerResponse] = {
u["id"]: u for u in serialized_users
}
serialized_results: dict[str, UserSerializerResponse] = {}
for commit_author, user_id in unserialized_results.items():
# edge case: a user from unserialized_results could not come back in serialized_users
if user_id in user_id_to_serialized_user_map:
serialized_results[str(commit_author.id)] = user_id_to_serialized_user_map[user_id]
else:
remaining_missed_authors.append(commit_author)
authors_with_no_matches: dict[str, NonMappableUser] = {}
for author in remaining_missed_authors:
authors_with_no_matches[str(author.id)] = {
"name": author.name,
"email": author.email,
}
final_results = {**cached_results, **serialized_results, **authors_with_no_matches}
to_cache = {}
for author in missed:
to_cache[_user_to_author_cache_key(organization_id, author)] = final_results[str(author.id)]
cache.set_many(to_cache)
metrics.incr("sentry.release.get_users_for_authors.missed", amount=len(missed))
metrics.incr("sentry.release.get_users_for_authors.total", amount=len(final_results))
return final_results
| NonMappableUser |
python | numba__numba | numba/core/errors.py | {
"start": 13976,
"end": 16144
} | class ____(object):
"""
An object "fixing" warnings of a given category caught during
certain phases. The warnings can have their filename and lineno fixed,
and they are deduplicated as well.
When used as a context manager, any warnings caught by `.catch_warnings()`
will be flushed at the exit of the context manager.
"""
def __init__(self, category):
self._category = category
# {(filename, lineno, category) -> messages}
self._warnings = defaultdict(set)
@contextlib.contextmanager
def catch_warnings(self, filename=None, lineno=None):
"""
Store warnings and optionally fix their filename and lineno.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', self._category)
yield
for w in wlist:
msg = str(w.message)
if issubclass(w.category, self._category):
# Store warnings of this category for deduplication
filename = filename or w.filename
lineno = lineno or w.lineno
self._warnings[filename, lineno, w.category].add(msg)
else:
# Simply emit other warnings again
warnings.warn_explicit(msg, w.category,
w.filename, w.lineno)
def flush(self):
"""
Emit all stored warnings.
"""
def key(arg):
# It is possible through codegen to create entirely identical
# warnings, this leads to comparing types when sorting which breaks
# on Python 3. Key as str() and if the worse happens then `id`
# creates some uniqueness
return str(arg) + str(id(arg))
for (filename, lineno, category), messages in sorted(
self._warnings.items(), key=key):
for msg in sorted(messages):
warnings.warn_explicit(msg, category, filename, lineno)
self._warnings.clear()
def __enter__(self):
return
def __exit__(self, exc_type, exc_value, traceback):
self.flush()
| WarningsFixer |
python | ansible__ansible | lib/ansible/parsing/vault/__init__.py | {
"start": 2766,
"end": 2829
} | class ____(AnsibleVaultError):
pass
| AnsibleVaultPasswordError |
python | google__jax | jax/_src/core.py | {
"start": 18308,
"end": 18566
} | class ____(Var):
def __init__(self, aval: AbstractValue):
super().__init__(aval)
def __repr__(self): return '_'
def pretty_print(self, context: JaxprPpContext, *, print_dtype: bool = True):
del context, print_dtype # unused
return '_'
| DropVar |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/common.py | {
"start": 2455,
"end": 2712
} | class ____(CatalogModel):
amount: Decimal
current_billing_cycle: Optional[Decimal]
description: str
id: str
kind: str
name: str
never_expires: bool
number_of_billing_cycles: Optional[Decimal]
quantity: Optional[Decimal]
| AddOn |
python | scrapy__scrapy | tests/test_settings/__init__.py | {
"start": 529,
"end": 777
} | class ____:
def test_get_settings_priority(self):
for prio_str, prio_num in SETTINGS_PRIORITIES.items():
assert get_settings_priority(prio_str) == prio_num
assert get_settings_priority(99) == 99
| TestSettingsGlobalFuncs |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 179594,
"end": 181734
} | class ____(
test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
):
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_collectives(self):
self._test_collectives(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced(self):
self._test_allreduce_coalesced(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_all_to_all_single(self):
self._test_all_to_all_single(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_allgather_base(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 10, device=torch.device(device))
output_tensor = torch.zeros(10, 10, device=torch.device(device))
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
@requires_nccl()
@skip_if_lt_x_gpu(1)
@parametrize("float8_dtype", [torch.float8_e4m3fn, torch.float8_e5m2])
def test_allgather_float8(self, float8_dtype):
device = torch.device(f"cuda:{self.rank:d}")
if not sm_is_or_higher_than(device, 9, 0): # noqa: F821
self.skipTest("FP8 reduction support begins with sm90 capable devices")
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 16, device=torch.device(device)).to(float8_dtype)
output_tensor = torch.zeros(10, 16, device=torch.device(device)).to(
float8_dtype
)
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor.view(torch.float32), tensor.view(torch.float32))
instantiate_parametrized_tests(NcclProcessGroupWithDispatchedCollectivesTests)
| NcclProcessGroupWithDispatchedCollectivesTests |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ec2.py | {
"start": 5505,
"end": 7048
} | class ____(BaseEc2TestClass):
def test_init(self):
ec2_operator = EC2StartInstanceOperator(
task_id="task_test",
instance_id="i-123abc",
aws_conn_id="aws_conn_test",
region_name="region-test",
check_interval=3,
)
assert ec2_operator.task_id == "task_test"
assert ec2_operator.instance_id == "i-123abc"
assert ec2_operator.aws_conn_id == "aws_conn_test"
assert ec2_operator.region_name == "region-test"
assert ec2_operator.check_interval == 3
@mock_aws
def test_start_instance(self):
# create instance
ec2_hook = EC2Hook()
create_instance = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
)
instance_id = create_instance.execute(None)
# start instance
start_test = EC2StartInstanceOperator(
task_id="start_test",
instance_id=instance_id[0],
)
start_test.execute(None)
# assert instance state is running
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "running"
def test_template_fields(self):
ec2_operator = EC2StartInstanceOperator(
task_id="task_test",
instance_id="i-123abc",
aws_conn_id="aws_conn_test",
region_name="region-test",
check_interval=3,
)
validate_template_fields(ec2_operator)
| TestEC2StartInstanceOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 1172,
"end": 1268
} | class ____(ParentClosed3):
b: NotRequired[int]
# This should generate an error.
| ChildClosed3_3 |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 9901,
"end": 10993
} | class ____(Widget):
"""A representation of ``st.button`` and ``st.form_submit_button``."""
_value: bool
proto: ButtonProto = field(repr=False)
label: str
help: str
form_id: str
def __init__(self, proto: ButtonProto, root: ElementTree) -> None:
super().__init__(proto, root)
self._value = False
self.type = "button"
@property
def _widget_state(self) -> WidgetState:
ws = WidgetState()
ws.id = self.id
ws.trigger_value = self._value
return ws
@property
def value(self) -> bool:
"""The value of the button. (bool)""" # noqa: D400
if self._value:
return self._value
state = self.root.session_state
assert state
return cast("bool", state[TESTING_KEY][self.id])
def set_value(self, v: bool) -> Button:
"""Set the value of the button."""
self._value = v
return self
def click(self) -> Button:
"""Set the value of the button to True."""
return self.set_value(True)
@dataclass(repr=False)
| Button |
python | ray-project__ray | rllib/examples/envs/classes/mock_env.py | {
"start": 4447,
"end": 7675
} | class ____(VectorEnv):
"""A custom vector env that uses a single(!) CartPole sub-env.
However, this env pretends to be a vectorized one to illustrate how one
could create custom VectorEnvs w/o the need for actual vectorizations of
sub-envs under the hood.
"""
def __init__(self, episode_length, mocked_num_envs):
self.env = gym.make("CartPole-v1")
super().__init__(
observation_space=self.env.observation_space,
action_space=self.env.action_space,
num_envs=mocked_num_envs,
)
self.episode_len = episode_length
self.ts = 0
@override(VectorEnv)
def vector_reset(self, *, seeds=None, options=None):
# Since we only have one underlying sub-environment, just use the first seed
# and the first options dict (the user of this env thinks, there are
# `self.num_envs` sub-environments and sends that many seeds/options).
seeds = seeds or [None]
options = options or [None]
obs, infos = self.env.reset(seed=seeds[0], options=options[0])
# Simply repeat the single obs/infos to pretend we really have
# `self.num_envs` sub-environments.
return (
[obs for _ in range(self.num_envs)],
[infos for _ in range(self.num_envs)],
)
@override(VectorEnv)
def reset_at(self, index, *, seed=None, options=None):
self.ts = 0
return self.env.reset(seed=seed, options=options)
@override(VectorEnv)
def vector_step(self, actions):
self.ts += 1
# Apply all actions sequentially to the same env.
# Whether this would make a lot of sense is debatable.
obs_batch, rew_batch, terminated_batch, truncated_batch, info_batch = (
[],
[],
[],
[],
[],
)
for i in range(self.num_envs):
obs, rew, terminated, truncated, info = self.env.step(actions[i])
# Artificially truncate once time step limit has been reached.
# Note: Also terminate/truncate, when underlying CartPole is
# terminated/truncated.
if self.ts >= self.episode_len:
truncated = True
obs_batch.append(obs)
rew_batch.append(rew)
terminated_batch.append(terminated)
truncated_batch.append(truncated)
info_batch.append(info)
if terminated or truncated:
remaining = self.num_envs - (i + 1)
obs_batch.extend([obs for _ in range(remaining)])
rew_batch.extend([rew for _ in range(remaining)])
terminated_batch.extend([terminated for _ in range(remaining)])
truncated_batch.extend([truncated for _ in range(remaining)])
info_batch.extend([info for _ in range(remaining)])
break
return obs_batch, rew_batch, terminated_batch, truncated_batch, info_batch
@override(VectorEnv)
def get_sub_environments(self):
# You may also leave this method as-is, in which case, it would
# return an empty list.
return [self.env for _ in range(self.num_envs)]
| MockVectorEnv |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 8390,
"end": 9868
} | class ____:
def test_validation_with_errors(self):
symbol_info = SymbolInfo(symbol_path="test.func", file_path=Path("/test.py"))
# Mock the validation function to return errors and warnings
mock_result = ValidatorResult.create("test.func")
mock_result = mock_result.with_error("Test error").with_warning("Test warning")
with patch(
"automation.dagster_docs.changed_validator.validate_symbol_docstring",
return_value=mock_result,
):
results = validate_symbols({symbol_info})
assert len(results) == 1
result = results[0]
assert result.symbol_info == symbol_info
assert result.errors == ["Test error"]
assert result.warnings == ["Test warning"]
assert result.has_errors() is True
assert result.has_warnings() is True
def test_validation_exception_handling(self):
symbol_info = SymbolInfo(symbol_path="test.func", file_path=Path("/test.py"))
# Mock the validation function to raise an exception
with patch(
"automation.dagster_docs.changed_validator.validate_symbol_docstring",
side_effect=ValueError("Validation failed"),
):
results = validate_symbols({symbol_info})
assert len(results) == 1
result = results[0]
assert result.has_errors() is True
assert "Validation error: Validation failed" in result.errors[0]
| TestValidateSymbols |
python | numpy__numpy | numpy/f2py/symbolic.py | {
"start": 44820,
"end": 53310
} | class ____:
def __init__(self, language=Language.C):
self.original = None
self.quotes_map = None
self.language = language
def finalize_string(self, s):
return insert_quotes(s, self.quotes_map)
def parse(self, inp):
self.original = inp
unquoted, self.quotes_map = eliminate_quotes(inp)
return self.process(unquoted)
def process(self, s, context='expr'):
"""Parse string within the given context.
The context may define the result in case of ambiguous
expressions. For instance, consider expressions `f(x, y)` and
`(x, y) + (a, b)` where `f` is a function and pair `(x, y)`
denotes complex number. Specifying context as "args" or
"expr", the subexpression `(x, y)` will be parse to an
argument list or to a complex number, respectively.
"""
if isinstance(s, (list, tuple)):
return type(s)(self.process(s_, context) for s_ in s)
assert isinstance(s, str), (type(s), s)
# replace subexpressions in parenthesis with f2py @-names
r, raw_symbols_map = replace_parenthesis(s)
r = r.strip()
def restore(r):
# restores subexpressions marked with f2py @-names
if isinstance(r, (list, tuple)):
return type(r)(map(restore, r))
return unreplace_parenthesis(r, raw_symbols_map)
# comma-separated tuple
if ',' in r:
operands = restore(r.split(','))
if context == 'args':
return tuple(self.process(operands))
if context == 'expr':
if len(operands) == 2:
# complex number literal
return as_complex(*self.process(operands))
raise NotImplementedError(
f'parsing comma-separated list (context={context}): {r}')
# ternary operation
m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r)
if m:
assert context == 'expr', context
oper, expr1, expr2 = restore(m.groups())
oper = self.process(oper)
expr1 = self.process(expr1)
expr2 = self.process(expr2)
return as_ternary(oper, expr1, expr2)
# relational expression
if self.language is Language.Fortran:
m = re.match(
r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I)
else:
m = re.match(
r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r)
if m:
left, rop, right = m.groups()
if self.language is Language.Fortran:
rop = '.' + rop + '.'
left, right = self.process(restore((left, right)))
rop = RelOp.fromstring(rop, language=self.language)
return Expr(Op.RELATIONAL, (rop, left, right))
# keyword argument
m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r)
if m:
keyname, value = m.groups()
value = restore(value)
return _Pair(keyname, self.process(value))
# addition/subtraction operations
operands = re.split(r'((?<!\d[edED])[+-])', r)
if len(operands) > 1:
result = self.process(restore(operands[0] or '0'))
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(restore(operand))
op = op.strip()
if op == '+':
result += operand
else:
assert op == '-'
result -= operand
return result
# string concatenate operation
if self.language is Language.Fortran and '//' in r:
operands = restore(r.split('//'))
return Expr(Op.CONCAT,
tuple(self.process(operands)))
# multiplication/division operations
operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)',
(r if self.language is Language.C
else r.replace('**', '@__f2py_DOUBLE_STAR@')))
if len(operands) > 1:
operands = restore(operands)
if self.language is not Language.C:
operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**')
for operand in operands]
# Expression is an arithmetic product
result = self.process(operands[0])
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(operand)
op = op.strip()
if op == '*':
result *= operand
else:
assert op == '/'
result /= operand
return result
# referencing/dereferencing
if r.startswith(('*', '&')):
op = {'*': Op.DEREF, '&': Op.REF}[r[0]]
operand = self.process(restore(r[1:]))
return Expr(op, operand)
# exponentiation operations
if self.language is not Language.C and '**' in r:
operands = list(reversed(restore(r.split('**'))))
result = self.process(operands[0])
for operand in operands[1:]:
operand = self.process(operand)
result = operand ** result
return result
# int-literal-constant
m = re.match(r'\A({digit_string})({kind}|)\Z'.format(
digit_string=r'\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
return as_integer(int(value), kind or 4)
# real-literal-constant
m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z'
.format(
significant=r'[.]\d+|\d+[.]\d*',
exponent=r'[edED][+-]?\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
value = value.lower()
if 'd' in value:
return as_real(float(value.replace('d', 'e')), kind or 8)
return as_real(float(value), kind or 4)
# string-literal-constant with kind parameter specification
if r in self.quotes_map:
kind = r[:r.find('@')]
return as_string(self.quotes_map[r], kind or 1)
# array constructor or literal complex constant or
# parenthesized expression
if r in raw_symbols_map:
paren = _get_parenthesis_kind(r)
items = self.process(restore(raw_symbols_map[r]),
'expr' if paren == 'ROUND' else 'args')
if paren == 'ROUND':
if isinstance(items, Expr):
return items
if paren in ['ROUNDDIV', 'SQUARE']:
# Expression is an array constructor
if isinstance(items, Expr):
items = (items,)
return as_array(items)
# function call/indexing
m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z',
r)
if m:
target, args, paren = m.groups()
target = self.process(restore(target))
args = self.process(restore(args)[1:-1], 'args')
if not isinstance(args, tuple):
args = args,
if paren == 'ROUND':
kwargs = {a.left: a.right for a in args
if isinstance(a, _Pair)}
args = tuple(a for a in args if not isinstance(a, _Pair))
# Warning: this could also be Fortran indexing operation..
return as_apply(target, *args, **kwargs)
else:
# Expression is a C/Python indexing operation
# (e.g. used in .pyf files)
assert paren == 'SQUARE'
return target[args]
# Fortran standard conforming identifier
m = re.match(r'\A\w[\w\d_]*\Z', r)
if m:
return as_symbol(r)
# fall-back to symbol
r = self.finalize_string(restore(r))
ewarn(
f'fromstring: treating {r!r} as symbol (original={self.original})')
return as_symbol(r)
| _FromStringWorker |
python | sqlalchemy__sqlalchemy | examples/versioned_history/test_versioning.py | {
"start": 29673,
"end": 29825
} | class ____(TestVersioning):
def make_base(self):
class Base(DeclarativeBase):
pass
self.Base = Base
| TestVersioningNewBase |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-isaacus/llama_index/embeddings/isaacus/base.py | {
"start": 569,
"end": 10178
} | class ____(BaseEmbedding):
"""
Isaacus Embeddings Integration.
This class provides an interface to Isaacus' embedding API, featuring the
Kanon 2 Embedder - the world's most accurate legal embedding model on the
Massive Legal Embedding Benchmark (MLEB).
Args:
model (str, optional): The model to use. Defaults to "kanon-2-embedder".
api_key (str, optional): The API key for Isaacus. Defaults to ISAACUS_API_KEY.
base_url (str, optional): The base URL for Isaacus API. Defaults to ISAACUS_BASE_URL.
dimensions (int, optional): The desired embedding dimensionality.
task (str, optional): Task type: "retrieval/query" or "retrieval/document".
overflow_strategy (str, optional): Strategy for handling overflow. Defaults to "drop_end".
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
**kwargs: Additional keyword arguments.
Environment Variables:
- ISAACUS_API_KEY: The API key for Isaacus
- ISAACUS_BASE_URL: The base URL for Isaacus API (optional)
Raises:
ValueError: If required environment variables are not set.
"""
model: str = Field(
default=DEFAULT_ISAACUS_MODEL,
description="The model to use for embeddings.",
)
api_key: Optional[str] = Field(default=None, description="The API key for Isaacus.")
base_url: Optional[str] = Field(
default=None, description="The base URL for Isaacus API."
)
dimensions: Optional[int] = Field(
default=None, description="The desired embedding dimensionality."
)
task: Optional[Literal["retrieval/query", "retrieval/document"]] = Field(
default=None,
description="Task type: 'retrieval/query' or 'retrieval/document'.",
)
overflow_strategy: Optional[Literal["drop_end"]] = Field(
default="drop_end", description="Strategy for handling overflow."
)
timeout: float = Field(default=60.0, description="Timeout for requests in seconds.")
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_ISAACUS_MODEL,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
task: Optional[Literal["retrieval/query", "retrieval/document"]] = None,
overflow_strategy: Optional[Literal["drop_end"]] = "drop_end",
timeout: float = 60.0,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""
Initialize an instance of the IsaacusEmbedding class.
Args:
model (str, optional): The model to use. Defaults to "kanon-2-embedder".
api_key (str, optional): The API key for Isaacus. Defaults to ISAACUS_API_KEY.
base_url (str, optional): The base URL for Isaacus API.
dimensions (int, optional): The desired embedding dimensionality.
task (str, optional): Task type: "retrieval/query" or "retrieval/document".
overflow_strategy (str, optional): Strategy for handling overflow.
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
embed_batch_size (int, optional): Batch size for embedding calls. Defaults to DEFAULT_EMBED_BATCH_SIZE.
callback_manager (Optional[CallbackManager], optional): Callback manager. Defaults to None.
**kwargs: Additional keyword arguments.
"""
# Get API key from parameter or environment
try:
api_key = get_from_param_or_env(
"api_key",
api_key,
"ISAACUS_API_KEY",
)
except ValueError:
raise ValueError(
"API key is required. Set ISAACUS_API_KEY environment variable or pass api_key parameter."
)
# Get base URL from parameter or environment (optional)
if base_url is None:
try:
base_url = get_from_param_or_env(
"base_url",
base_url,
"ISAACUS_BASE_URL",
)
except ValueError:
base_url = DEFAULT_ISAACUS_API_BASE
super().__init__(
model_name=model,
model=model,
api_key=api_key,
base_url=base_url,
dimensions=dimensions,
task=task,
overflow_strategy=overflow_strategy,
timeout=timeout,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
# Initialize Isaacus clients
self._client = isaacus.Isaacus(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
)
self._aclient = isaacus.AsyncIsaacus(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "IsaacusEmbedding"
def _prepare_request_params(
self, text: str, task_override: Optional[str] = None
) -> dict:
"""Prepare request parameters for the Isaacus API."""
params = {
"model": self.model,
"texts": text,
}
# Use task_override if provided, otherwise use instance task
task_to_use = task_override if task_override is not None else self.task
if task_to_use is not None:
params["task"] = task_to_use
if self.dimensions is not None:
params["dimensions"] = self.dimensions
if self.overflow_strategy is not None:
params["overflow_strategy"] = self.overflow_strategy
return params
def _get_query_embedding(self, query: str) -> Embedding:
"""
Get query embedding.
For queries, we use the 'retrieval/query' task if no task is explicitly set.
"""
return self._get_text_embedding(query, task_override="retrieval/query")
def _get_text_embedding(
self, text: str, task_override: Optional[str] = None
) -> Embedding:
"""Get text embedding."""
try:
params = self._prepare_request_params(text, task_override)
response = self._client.embeddings.create(**params)
# Extract the embedding from the response
if response.embeddings and len(response.embeddings) > 0:
return response.embeddings[0].embedding
else:
raise ValueError("No embeddings returned from API")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Get embeddings for multiple texts.
Note: The Isaacus API supports batch embedding, so we send all texts at once.
"""
try:
params = self._prepare_request_params(texts, task_override=self.task)
response = self._client.embeddings.create(**params)
# Extract embeddings from response, maintaining order
embeddings = []
for emb_obj in sorted(response.embeddings, key=lambda x: x.index):
embeddings.append(emb_obj.embedding)
return embeddings
except Exception as e:
logger.error(f"Error while embedding texts: {e}")
raise ValueError(f"Unable to embed texts: {e}")
async def _aget_query_embedding(self, query: str) -> Embedding:
"""
Get query embedding asynchronously.
For queries, we use the 'retrieval/query' task if no task is explicitly set.
"""
return await self._aget_text_embedding(query, task_override="retrieval/query")
async def _aget_text_embedding(
self, text: str, task_override: Optional[str] = None
) -> Embedding:
"""Get text embedding asynchronously."""
try:
params = self._prepare_request_params(text, task_override)
response = await self._aclient.embeddings.create(**params)
# Extract the embedding from the response
if response.embeddings and len(response.embeddings) > 0:
return response.embeddings[0].embedding
else:
raise ValueError("No embeddings returned from API")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Get embeddings for multiple texts asynchronously.
Note: The Isaacus API supports batch embedding, so we send all texts at once.
"""
try:
params = self._prepare_request_params(texts, task_override=self.task)
response = await self._aclient.embeddings.create(**params)
# Extract embeddings from response, maintaining order
embeddings = []
for emb_obj in sorted(response.embeddings, key=lambda x: x.index):
embeddings.append(emb_obj.embedding)
return embeddings
except Exception as e:
logger.error(f"Error while embedding texts: {e}")
raise ValueError(f"Unable to embed texts: {e}")
| IsaacusEmbedding |
python | getsentry__sentry | src/sentry/uptime/migrations/0046_delete_project_uptime_subscription_table.py | {
"start": 239,
"end": 1510
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("uptime", "0045_backfill_detector_thresholds"),
]
operations = [
SafeDeleteModel(
name="ProjectUptimeSubscription",
deletion_action=DeletionAction.DELETE,
),
]
| Migration |
python | sympy__sympy | sympy/tensor/tensor.py | {
"start": 55655,
"end": 58073
} | class ____(Basic):
"""
Class of tensor types. Deprecated, use tensor_heads() instead.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return f"TensorType({[str(x) for x in self.index_types]})"
def __call__(self, s, comm=0):
"""
Return a TensorHead object or a list of TensorHead objects.
Parameters
==========
s : name or string of names.
comm : Commutation group.
see ``_TensorManager.set_comm``
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self.index_types, self.symmetry, comm)
else:
return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names]
@deprecated(
"""
The tensorhead() function is deprecated. Use tensor_heads() instead.
""",
deprecated_since_version="1.5",
active_deprecations_target="deprecated-tensorhead",
)
def tensorhead(name, typ, sym=None, comm=0):
"""
Function generating tensorhead(s). This method is deprecated,
use TensorHead constructor or tensor_heads() instead.
Parameters
==========
name : name or sequence of names (as in ``symbols``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
"""
if sym is None:
sym = [[1] for i in range(len(typ))]
with ignore_warnings(SymPyDeprecationWarning):
sym = tensorsymmetry(*sym)
return TensorHead(name, typ, sym, comm)
| TensorType |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 21356,
"end": 21951
} | class ____(WebSocketBaseTestCase):
def get_app(self):
return Application([("/native", NativeCoroutineOnMessageHandler)])
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect("/native")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
@abstract_base_test
| WebSocketNativeCoroutineTest |
python | sympy__sympy | sympy/logic/boolalg.py | {
"start": 45463,
"end": 116701
} | class ____(BooleanFunction):
"""
True if only one or no argument is true.
``Exclusive(A, B, C)`` is equivalent to ``~(A & B) & ~(A & C) & ~(B & C)``.
For two arguments, this is equivalent to :py:class:`~.Xor`.
Examples
========
>>> from sympy.logic.boolalg import Exclusive
>>> Exclusive(False, False, False)
True
>>> Exclusive(False, True, False)
True
>>> Exclusive(False, True, True)
False
"""
@classmethod
def eval(cls, *args):
and_args = []
for a, b in combinations(args, 2):
and_args.append(Not(And(a, b)))
return And(*and_args)
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in ``expr``.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset({A, B})
>>> conjuncts(A | B)
frozenset({A | B})
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in ``expr``.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset({A, B})
>>> disjuncts(A & B)
frozenset({A & B})
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence ``expr`` consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
(A | ~B) & (A | ~C)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence ``expr`` consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
(B & C) | (C & ~A)
"""
return _distribute((expr, Or, And))
def distribute_xor_over_and(expr):
"""
Given a sentence ``expr`` consisting of conjunction and
exclusive disjunctions of literals, return an
equivalent exclusive disjunction.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_xor_over_and, And, Xor, Not
>>> from sympy.abc import A, B, C
>>> distribute_xor_over_and(And(Xor(Not(A), B), C))
(B & C) ^ (C & ~A)
"""
return _distribute((expr, Xor, And))
def _distribute(info):
"""
Distributes ``info[1]`` over ``info[2]`` with respect to ``info[0]``.
"""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
[(info[2](c, rest), info[1], info[2])
for c in conj.args])), remove_true=False)
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
[(x, info[1], info[2])
for x in info[0].args])),
remove_true=False)
else:
return info[0]
def to_anf(expr, deep=True):
r"""
Converts expr to Algebraic Normal Form (ANF).
ANF is a canonical normal form, which means that two
equivalent formulas will convert to the same ANF.
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it can be:
- purely true,
- purely false,
- conjunction of variables,
- exclusive disjunction.
The exclusive disjunction can only contain true, variables
or conjunction of variables. No negations are permitted.
If ``deep`` is ``False``, arguments of the boolean
expression are considered variables, i.e. only the
top-level expression is converted to ANF.
Examples
========
>>> from sympy.logic.boolalg import And, Or, Not, Implies, Equivalent
>>> from sympy.logic.boolalg import to_anf
>>> from sympy.abc import A, B, C
>>> to_anf(Not(A))
A ^ True
>>> to_anf(And(Or(A, B), Not(C)))
A ^ B ^ (A & B) ^ (A & C) ^ (B & C) ^ (A & B & C)
>>> to_anf(Implies(Not(A), Equivalent(B, C)), deep=False)
True ^ ~A ^ (~A & (Equivalent(B, C)))
"""
expr = sympify(expr)
if is_anf(expr):
return expr
return expr.to_anf(deep=deep)
def to_nnf(expr, simplify=True, form=None):
"""
Converts ``expr`` to Negation Normal Form (NNF).
A logical expression is in NNF if it
contains only :py:class:`~.And`, :py:class:`~.Or` and :py:class:`~.Not`,
and :py:class:`~.Not` is applied only to literals.
If ``simplify`` is ``True``, the result contains no redundant clauses.
Parameters
==========
expr : boolean expression
The expression to convert to NNF.
simplify : bool, optional
If True, simplify the result. Default is True.
form : str, optional
Target form hint: 'cnf' for conjunctive normal form bias,
'dnf' for disjunctive normal form bias, or None (default).
This hint optimizes XOR conversions.
Examples
========
>>> from sympy.abc import A, B, C, D
>>> from sympy.logic.boolalg import Not, Equivalent, to_nnf
>>> to_nnf(Not((~A & ~B) | (C & D)))
(A | B) & (~C | ~D)
>>> to_nnf(Equivalent(A >> B, B >> A))
(A | ~B | (A & ~B)) & (B | ~A | (B & ~A))
"""
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify, form=form)
def to_cnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to conjunctive normal
form: ``((A | ~B | ...) & (B | C | ...) & ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest CNF
form using the Quine-McCluskey algorithm; this may take a long
time. If there are more than 8 variables the ``force`` flag must be set
to ``True`` to simplify (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
(D | ~A) & (D | ~B)
>>> to_cnf((A | B) & (A | ~A), True)
A | B
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'cnf', True, force=force)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr, form='cnf')
res = distribute_and_over_or(expr)
return res
def to_dnf(expr, simplify=False, force=False):
"""
Convert a propositional logical sentence ``expr`` to disjunctive normal
form: ``((A & ~B & ...) | (B & C & ...) | ...)``.
If ``simplify`` is ``True``, ``expr`` is evaluated to its simplest DNF form using
the Quine-McCluskey algorithm; this may take a long
time. If there are more than 8 variables, the ``force`` flag must be set to
``True`` to simplify (default is ``False``).
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C
>>> to_dnf(B & (A | C))
(A & B) | (B & C)
>>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True)
A | C
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
if not force and len(_find_predicates(expr)) > 8:
raise ValueError(filldedent('''
To simplify a logical expression with more
than 8 variables may take a long time and requires
the use of `force=True`.'''))
return simplify_logic(expr, 'dnf', True, force=force)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr, form='dnf')
return distribute_or_over_and(expr)
def is_anf(expr):
r"""
Checks if ``expr`` is in Algebraic Normal Form (ANF).
A logical expression is in ANF if it has the form
.. math:: 1 \oplus a \oplus b \oplus ab \oplus abc
i.e. it is purely true, purely false, conjunction of
variables or exclusive disjunction. The exclusive
disjunction can only contain true, variables or
conjunction of variables. No negations are permitted.
Examples
========
>>> from sympy.logic.boolalg import And, Not, Xor, true, is_anf
>>> from sympy.abc import A, B, C
>>> is_anf(true)
True
>>> is_anf(A)
True
>>> is_anf(And(A, B, C))
True
>>> is_anf(Xor(A, Not(B)))
False
"""
expr = sympify(expr)
if is_literal(expr) and not isinstance(expr, Not):
return True
if isinstance(expr, And):
for arg in expr.args:
if not arg.is_Symbol:
return False
return True
elif isinstance(expr, Xor):
for arg in expr.args:
if isinstance(arg, And):
for a in arg.args:
if not a.is_Symbol:
return False
elif is_literal(arg):
if isinstance(arg, Not):
return False
else:
return False
return True
else:
return False
def is_nnf(expr, simplified=True):
"""
Checks if ``expr`` is in Negation Normal Form (NNF).
A logical expression is in NNF if it
contains only :py:class:`~.And`, :py:class:`~.Or` and :py:class:`~.Not`,
and :py:class:`~.Not` is applied only to literals.
If ``simplified`` is ``True``, checks if result contains no redundant clauses.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import Not, is_nnf
>>> is_nnf(A & B | ~C)
True
>>> is_nnf((A | ~A) & (B | C))
False
>>> is_nnf((A | ~A) & (B | C), False)
True
>>> is_nnf(Not(A & B) | C)
False
>>> is_nnf((A >> B) & (B >> A))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
vals = function1.make_args(expr) if isinstance(expr, function1) else [expr]
for lit in vals:
if isinstance(lit, function2):
vals2 = function2.make_args(lit) if isinstance(lit, function2) else [lit]
for l in vals2:
if is_literal(l) is False:
return False
elif is_literal(lit) is False:
return False
return True
def eliminate_implications(expr, form=None):
"""
Change :py:class:`~.Implies` and :py:class:`~.Equivalent` into
:py:class:`~.And`, :py:class:`~.Or`, and :py:class:`~.Not`.
That is, return an expression that is equivalent to ``expr``, but has only
``&``, ``|``, and ``~`` as logical
operators.
Parameters
==========
expr : boolean expression
The expression to eliminate implications from.
form : str, optional
Target form hint: 'cnf' or 'dnf'. Passed to to_nnf for optimization.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
B | ~A
>>> eliminate_implications(Equivalent(A, B))
(A | ~B) & (B | ~A)
>>> eliminate_implications(Equivalent(A, B, C))
(A | ~C) & (B | ~A) & (C | ~B)
"""
return to_nnf(expr, simplify=False, form=form)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> from sympy import Or, Q
>>> from sympy.abc import A, B
>>> from sympy.logic.boolalg import is_literal
>>> is_literal(A)
True
>>> is_literal(~A)
True
>>> is_literal(Q.zero(A))
True
>>> is_literal(A + B)
True
>>> is_literal(Or(A, B))
False
"""
from sympy.assumptions import AppliedPredicate
if isinstance(expr, Not):
return is_literal(expr.args[0])
elif expr in (True, False) or isinstance(expr, AppliedPredicate) or expr.is_Atom:
return True
elif not isinstance(expr, BooleanFunction) and all(
(isinstance(expr, AppliedPredicate) or a.is_Atom) for a in expr.args):
return True
return False
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [{1, 2}, {2}]
True
"""
# Convert the symbol list into a dict
symbols = dict(zip(symbols, range(1, len(symbols) + 1)))
def append_symbol(arg, symbols):
if isinstance(arg, Not):
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [{append_symbol(arg, symbols) for arg in Or.make_args(c)}
for c in clauses]
def term_to_integer(term):
"""
Return an integer corresponding to the base-2 digits given by *term*.
Parameters
==========
term : a string or list of ones and zeros
Examples
========
>>> from sympy.logic.boolalg import term_to_integer
>>> term_to_integer([1, 0, 0])
4
>>> term_to_integer('100')
4
"""
return int(''.join(list(map(str, list(term)))), 2)
integer_to_term = ibin # XXX could delete?
def truth_table(expr, variables, input=True):
"""
Return a generator of all possible configurations of the input variables,
and the result of the boolean expression for those values.
Parameters
==========
expr : Boolean expression
variables : list of variables
input : bool (default ``True``)
Indicates whether to return the input combinations.
Examples
========
>>> from sympy.logic.boolalg import truth_table
>>> from sympy.abc import x,y
>>> table = truth_table(x >> y, [x, y])
>>> for t in table:
... print('{0} -> {1}'.format(*t))
[0, 0] -> True
[0, 1] -> True
[1, 0] -> False
[1, 1] -> True
>>> table = truth_table(x | y, [x, y])
>>> list(table)
[([0, 0], False), ([0, 1], True), ([1, 0], True), ([1, 1], True)]
If ``input`` is ``False``, ``truth_table`` returns only a list of truth values.
In this case, the corresponding input values of variables can be
deduced from the index of a given output.
>>> from sympy.utilities.iterables import ibin
>>> vars = [y, x]
>>> values = truth_table(x >> y, vars, input=False)
>>> values = list(values)
>>> values
[True, False, True, True]
>>> for i, value in enumerate(values):
... print('{0} -> {1}'.format(list(zip(
... vars, ibin(i, len(vars)))), value))
[(y, 0), (x, 0)] -> True
[(y, 0), (x, 1)] -> False
[(y, 1), (x, 0)] -> True
[(y, 1), (x, 1)] -> True
"""
variables = [sympify(v) for v in variables]
expr = sympify(expr)
if not isinstance(expr, BooleanFunction) and not is_literal(expr):
return
table = product((0, 1), repeat=len(variables))
for term in table:
value = expr.xreplace(dict(zip(variables, term)))
if input:
yield list(term), value
else:
yield value
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns `-1`.
"""
# Early termination seems to be faster than list comprehension,
# at least for large examples.
index = -1
for x, i in enumerate(minterm1): # zip(minterm1, minterm2) is slower
if i != minterm2[x]:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for SOP).
"""
temp = [variables[n] if val == 1 else Not(variables[n])
for n, val in enumerate(minterm) if val != 3]
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for POS).
"""
temp = [variables[n] if val == 0 else Not(variables[n])
for n, val in enumerate(maxterm) if val != 3]
return Or(*temp)
def _convert_to_varsANF(term, variables):
"""
Converts a term in the expansion of a function from binary to its
variable form (for ANF).
Parameters
==========
term : list of 1's and 0's (complementation pattern)
variables : list of variables
"""
temp = [variables[n] for n, t in enumerate(term) if t == 1]
if not temp:
return true
return And(*temp)
def _get_odd_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an odd number of ones.
"""
return [[1 if (mask >> i) & 1 else 0 for i in range(n)]
for mask in range(1 << n) if _bit_count(mask) % 2 == 1]
def _get_even_parity_terms(n):
"""
Returns a list of lists, with all possible combinations of n zeros and ones
with an even number of ones.
"""
return [[1 if (mask >> i) & 1 else 0 for i in range(n)]
for mask in range(1 << n) if _bit_count(mask) % 2 == 0]
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
if not terms:
return []
simplified_terms = []
todo = list(range(len(terms)))
# Count number of ones as _check_pair can only potentially match if there
# is at most a difference of a single one
termdict = defaultdict(list)
for n, term in enumerate(terms):
ones = sum(1 for t in term if t == 1)
termdict[ones].append(n)
variables = len(terms[0])
for k in range(variables):
for i in termdict[k]:
for j in termdict[k+1]:
index = _check_pair(terms[i], terms[j])
if index != -1:
# Mark terms handled
todo[i] = todo[j] = None
# Copy old term
newterm = terms[i][:]
# Set differing position to don't care
newterm[index] = 3
# Add if not already there
if newterm not in simplified_terms:
simplified_terms.append(newterm)
if simplified_terms:
# Further simplifications only among the new terms
simplified_terms = _simplified_pairs(simplified_terms)
# Add remaining, non-simplified, terms
simplified_terms.extend([terms[i] for i in todo if i is not None])
return simplified_terms
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
if not terms:
return []
nterms = len(terms)
nl1 = len(l1)
# Create dominating matrix
dommatrix = [[0]*nl1 for n in range(nterms)]
colcount = [0]*nl1
rowcount = [0]*nterms
for primei, prime in enumerate(l1):
for termi, term in enumerate(terms):
# Check prime implicant covering term
if all(t == 3 or t == mt for t, mt in zip(prime, term)):
dommatrix[termi][primei] = 1
colcount[primei] += 1
rowcount[termi] += 1
# Keep track if anything changed
anythingchanged = True
# Then, go again
while anythingchanged:
anythingchanged = False
for rowi in range(nterms):
# Still non-dominated?
if rowcount[rowi]:
row = dommatrix[rowi]
for row2i in range(nterms):
# Still non-dominated?
if rowi != row2i and rowcount[rowi] and (rowcount[rowi] <= rowcount[row2i]):
row2 = dommatrix[row2i]
if all(row2[n] >= row[n] for n in range(nl1)):
# row2 dominating row, remove row2
rowcount[row2i] = 0
anythingchanged = True
for primei, prime in enumerate(row2):
if prime:
# Make corresponding entry 0
dommatrix[row2i][primei] = 0
colcount[primei] -= 1
colcache = {}
for coli in range(nl1):
# Still non-dominated?
if colcount[coli]:
if coli in colcache:
col = colcache[coli]
else:
col = [dommatrix[i][coli] for i in range(nterms)]
colcache[coli] = col
for col2i in range(nl1):
# Still non-dominated?
if coli != col2i and colcount[col2i] and (colcount[coli] >= colcount[col2i]):
if col2i in colcache:
col2 = colcache[col2i]
else:
col2 = [dommatrix[i][col2i] for i in range(nterms)]
colcache[col2i] = col2
if all(col[n] >= col2[n] for n in range(nterms)):
# col dominating col2, remove col2
colcount[col2i] = 0
anythingchanged = True
for termi, term in enumerate(col2):
if term and dommatrix[termi][col2i]:
# Make corresponding entry 0
dommatrix[termi][col2i] = 0
rowcount[termi] -= 1
if not anythingchanged:
# Heuristically select the prime implicant covering most terms
maxterms = 0
bestcolidx = -1
for coli in range(nl1):
s = colcount[coli]
if s > maxterms:
bestcolidx = coli
maxterms = s
# In case we found a prime implicant covering at least two terms
if bestcolidx != -1 and maxterms > 1:
for primei, prime in enumerate(l1):
if primei != bestcolidx:
for termi, term in enumerate(colcache[bestcolidx]):
if term and dommatrix[termi][primei]:
# Make corresponding entry 0
dommatrix[termi][primei] = 0
anythingchanged = True
rowcount[termi] -= 1
colcount[primei] -= 1
return [l1[i] for i in range(nl1) if colcount[i]]
def _input_to_binlist(inputlist, variables):
binlist = []
bits = len(variables)
for val in inputlist:
if isinstance(val, int):
binlist.append(ibin(val, bits))
elif isinstance(val, dict):
nonspecvars = list(variables)
for key in val.keys():
nonspecvars.remove(key)
for t in product((0, 1), repeat=len(nonspecvars)):
d = dict(zip(nonspecvars, t))
d.update(val)
binlist.append([d[v] for v in variables])
elif isinstance(val, (list, tuple)):
if len(val) != bits:
raise ValueError("Each term must contain {bits} bits as there are"
"\n{bits} variables (or be an integer)."
"".format(bits=bits))
binlist.append(list(val))
else:
raise TypeError("A term list can only contain lists,"
" ints or dicts.")
return binlist
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest sum-of-products form.
The variables must be given as the first argument.
Return a logical :py:class:`~.Or` function (i.e., the "sum of products" or
"SOP" form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (~w & ~x)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(y & z) | (~w & ~x)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> SOPform([w, x, y, z], minterms)
(x & ~w) | (y & z & ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> SOPform([w, x, y, z], minterms, dontcares)
(w & y & z) | (~w & ~y) | (x & z & ~w)
See also
========
POSform
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
.. [2] https://en.wikipedia.org/wiki/Don%27t-care_term
"""
if not minterms:
return false
variables = tuple(map(sympify, variables))
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
return _sop_form(variables, minterms, dontcares)
def _sop_form(variables, minterms, dontcares):
new = _simplified_pairs(minterms + dontcares)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest product-of-sums form.
The variables must be given as the first argument.
Return a logical :py:class:`~.And` function (i.e., the "product of sums"
or "POS" form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> from sympy import symbols
>>> w, x, y, z = symbols('w x y z')
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
The terms can also be represented as integers:
>>> minterms = [1, 3, 7, 11, 15]
>>> dontcares = [0, 2, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
z & (y | ~w)
They can also be specified using dicts, which does not have to be fully
specified:
>>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}]
>>> POSform([w, x, y, z], minterms)
(x | y) & (x | z) & (~w | ~x)
Or a combination:
>>> minterms = [4, 7, 11, [1, 1, 1, 1]]
>>> dontcares = [{w : 0, x : 0, y: 0}, 5]
>>> POSform([w, x, y, z], minterms, dontcares)
(w | x) & (y | ~w) & (z | ~y)
See also
========
SOPform
References
==========
.. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
.. [2] https://en.wikipedia.org/wiki/Don%27t-care_term
"""
if not minterms:
return false
variables = tuple(map(sympify, variables))
minterms = _input_to_binlist(minterms, variables)
dontcares = _input_to_binlist((dontcares or []), variables)
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product((0, 1), repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
new = _simplified_pairs(maxterms + dontcares)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def ANFform(variables, truthvalues):
"""
The ANFform function converts the list of truth values to
Algebraic Normal Form (ANF).
The variables must be given as the first argument.
Return True, False, logical :py:class:`~.And` function (i.e., the
"Zhegalkin monomial") or logical :py:class:`~.Xor` function (i.e.,
the "Zhegalkin polynomial"). When True and False
are represented by 1 and 0, respectively, then
:py:class:`~.And` is multiplication and :py:class:`~.Xor` is addition.
Formally a "Zhegalkin monomial" is the product (logical
And) of a finite set of distinct variables, including
the empty set whose product is denoted 1 (True).
A "Zhegalkin polynomial" is the sum (logical Xor) of a
set of Zhegalkin monomials, with the empty set denoted
by 0 (False).
Parameters
==========
variables : list of variables
truthvalues : list of 1's and 0's (result column of truth table)
Examples
========
>>> from sympy.logic.boolalg import ANFform
>>> from sympy.abc import x, y
>>> ANFform([x], [1, 0])
x ^ True
>>> ANFform([x, y], [0, 1, 1, 1])
x ^ y ^ (x & y)
References
==========
.. [1] https://en.wikipedia.org/wiki/Zhegalkin_polynomial
"""
n_vars = len(variables)
n_values = len(truthvalues)
if n_values != 2 ** n_vars:
raise ValueError("The number of truth values must be equal to 2^%d, "
"got %d" % (n_vars, n_values))
variables = tuple(map(sympify, variables))
coeffs = anf_coeffs(truthvalues)
terms = []
for i, t in enumerate(product((0, 1), repeat=n_vars)):
if coeffs[i] == 1:
terms.append(t)
return Xor(*[_convert_to_varsANF(x, variables) for x in terms],
remove_true=False)
def anf_coeffs(truthvalues):
"""
Convert a list of truth values of some boolean expression
to the list of coefficients of the polynomial mod 2 (exclusive
disjunction) representing the boolean expression in ANF
(i.e., the "Zhegalkin polynomial").
There are `2^n` possible Zhegalkin monomials in `n` variables, since
each monomial is fully specified by the presence or absence of
each variable.
We can enumerate all the monomials. For example, boolean
function with four variables ``(a, b, c, d)`` can contain
up to `2^4 = 16` monomials. The 13-th monomial is the
product ``a & b & d``, because 13 in binary is 1, 1, 0, 1.
A given monomial's presence or absence in a polynomial corresponds
to that monomial's coefficient being 1 or 0 respectively.
Examples
========
>>> from sympy.logic.boolalg import anf_coeffs, bool_monomial, Xor
>>> from sympy.abc import a, b, c
>>> truthvalues = [0, 1, 1, 0, 0, 1, 0, 1]
>>> coeffs = anf_coeffs(truthvalues)
>>> coeffs
[0, 1, 1, 0, 0, 0, 1, 0]
>>> polynomial = Xor(*[
... bool_monomial(k, [a, b, c])
... for k, coeff in enumerate(coeffs) if coeff == 1
... ])
>>> polynomial
b ^ c ^ (a & b)
"""
s = '{:b}'.format(len(truthvalues))
n = len(s) - 1
if len(truthvalues) != 2**n:
raise ValueError("The number of truth values must be a power of two, "
"got %d" % len(truthvalues))
coeffs = [[v] for v in truthvalues]
for i in range(n):
tmp = []
for j in range(2 ** (n-i-1)):
tmp.append(coeffs[2*j] +
list(map(lambda x, y: x^y, coeffs[2*j], coeffs[2*j+1])))
coeffs = tmp
return coeffs[0]
def bool_minterm(k, variables):
"""
Return the k-th minterm.
Minterms are numbered by a binary encoding of the complementation
pattern of the variables. This convention assigns the value 1 to
the direct form and 0 to the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation pattern)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_minterm
>>> from sympy.abc import x, y, z
>>> bool_minterm([1, 0, 1], [x, y, z])
x & z & ~y
>>> bool_minterm(6, [x, y, z])
x & y & ~z
References
==========
.. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_minterms
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsSOP(k, variables)
def bool_maxterm(k, variables):
"""
Return the k-th maxterm.
Each maxterm is assigned an index based on the opposite
conventional binary encoding used for minterms. The maxterm
convention assigns the value 0 to the direct form and 1 to
the complemented form.
Parameters
==========
k : int or list of 1's and 0's (complementation pattern)
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_maxterm
>>> from sympy.abc import x, y, z
>>> bool_maxterm([1, 0, 1], [x, y, z])
y | ~x | ~z
>>> bool_maxterm(6, [x, y, z])
z | ~x | ~y
References
==========
.. [1] https://en.wikipedia.org/wiki/Canonical_normal_form#Indexing_maxterms
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsPOS(k, variables)
def bool_monomial(k, variables):
"""
Return the k-th monomial.
Monomials are numbered by a binary encoding of the presence and
absences of the variables. This convention assigns the value
1 to the presence of variable and 0 to the absence of variable.
Each boolean function can be uniquely represented by a
Zhegalkin Polynomial (Algebraic Normal Form). The Zhegalkin
Polynomial of the boolean function with `n` variables can contain
up to `2^n` monomials. We can enumerate all the monomials.
Each monomial is fully specified by the presence or absence
of each variable.
For example, boolean function with four variables ``(a, b, c, d)``
can contain up to `2^4 = 16` monomials. The 13-th monomial is the
product ``a & b & d``, because 13 in binary is 1, 1, 0, 1.
Parameters
==========
k : int or list of 1's and 0's
variables : list of variables
Examples
========
>>> from sympy.logic.boolalg import bool_monomial
>>> from sympy.abc import x, y, z
>>> bool_monomial([1, 0, 1], [x, y, z])
x & z
>>> bool_monomial(6, [x, y, z])
x & y
"""
if isinstance(k, int):
k = ibin(k, len(variables))
variables = tuple(map(sympify, variables))
return _convert_to_varsANF(k, variables)
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(map(_find_predicates, expr.args)))
def simplify_logic(expr, form=None, deep=True, force=False, dontcare=None):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an :py:class:`~.Or` or
:py:class:`~.And` object in SymPy.
Parameters
==========
expr : Boolean
form : string (``'cnf'`` or ``'dnf'``) or ``None`` (default).
If ``'cnf'`` or ``'dnf'``, the simplest expression in the corresponding
normal form is returned; if ``None``, the answer is returned
according to the form with fewest args (in CNF by default).
deep : bool (default ``True``)
Indicates whether to recursively simplify any
non-boolean functions contained within the input.
force : bool (default ``False``)
As the simplifications require exponential time in the number
of variables, there is by default a limit on expressions with
8 variables. When the expression has more than 8 variables
only symbolical simplification (controlled by ``deep``) is
made. By setting ``force`` to ``True``, this limit is removed. Be
aware that this can lead to very long simplification times.
dontcare : Boolean
Optimize expression under the assumption that inputs where this
expression is true are don't care. This is useful in e.g. Piecewise
conditions, where later conditions do not need to consider inputs that
are converted by previous conditions. For example, if a previous
condition is ``And(A, B)``, the simplification of expr can be made
with don't cares for ``And(A, B)``.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> b = (~x & ~y & ~z) | ( ~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> simplify_logic(x | y, dontcare=y)
x
References
==========
.. [1] https://en.wikipedia.org/wiki/Don%27t-care_term
"""
if form not in (None, 'cnf', 'dnf'):
raise ValueError("form can be cnf or dnf only")
expr = sympify(expr)
# check for quick exit if form is given: right form and all args are
# literal and do not involve Not
if form:
form_ok = False
if form == 'cnf':
form_ok = is_cnf(expr)
elif form == 'dnf':
form_ok = is_dnf(expr)
if form_ok and all(is_literal(a)
for a in expr.args):
return expr
from sympy.core.relational import Relational
if deep:
variables = expr.atoms(Relational)
from sympy.simplify.simplify import simplify
s = tuple(map(simplify, variables))
expr = expr.xreplace(dict(zip(variables, s)))
if not isinstance(expr, BooleanFunction):
return expr
# Replace Relationals with Dummys to possibly
# reduce the number of variables
repl = {}
undo = {}
from sympy.core.symbol import Dummy
variables = expr.atoms(Relational)
if dontcare is not None:
dontcare = sympify(dontcare)
variables.update(dontcare.atoms(Relational))
while variables:
var = variables.pop()
if var.is_Relational:
d = Dummy()
undo[d] = var
repl[var] = d
nvar = var.negated
if nvar in variables:
repl[nvar] = Not(d)
variables.remove(nvar)
expr = expr.xreplace(repl)
if dontcare is not None:
dontcare = dontcare.xreplace(repl)
# Get new variables after replacing
variables = _find_predicates(expr)
if not force and len(variables) > 8:
return expr.xreplace(undo)
if dontcare is not None:
# Add variables from dontcare
dcvariables = _find_predicates(dontcare)
variables.update(dcvariables)
# if too many restore to variables only
if not force and len(variables) > 8:
variables = _find_predicates(expr)
dontcare = None
# group into constants and variable values
c, v = sift(ordered(variables), lambda x: x in (True, False), binary=True)
variables = c + v
# standardize constants to be 1 or 0 in keeping with truthtable
c = [1 if i == True else 0 for i in c]
truthtable = _get_truthtable(v, expr, c)
if dontcare is not None:
dctruthtable = _get_truthtable(v, dontcare, c)
truthtable = [t for t in truthtable if t not in dctruthtable]
else:
dctruthtable = []
big = len(truthtable) >= (2 ** (len(variables) - 1))
if form == 'dnf' or form is None and big:
return _sop_form(variables, truthtable, dctruthtable).xreplace(undo)
return POSform(variables, truthtable, dctruthtable).xreplace(undo)
def _get_truthtable(variables, expr, const):
""" Return a list of all combinations leading to a True result for ``expr``.
"""
_variables = variables.copy()
def _get_tt(inputs):
if _variables:
v = _variables.pop()
tab = [[i[0].xreplace({v: false}), [0] + i[1]] for i in inputs if i[0] is not false]
tab.extend([[i[0].xreplace({v: true}), [1] + i[1]] for i in inputs if i[0] is not false])
return _get_tt(tab)
return inputs
res = [const + k[1] for k in _get_tt([[expr, []]]) if k[0]]
if res == [[]]:
return []
else:
return res
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol;
# of times it appeared as a Not(symbol);
# of times it appeared as a Symbol in an And or Or;
# of times it appeared as a Not(Symbol) in an And or Or;
a sorted tuple of tuples, (i, j, k), where i is the number of arguments
in an And or Or with which it appeared as a Symbol, and j is
the number of arguments that were Not(Symbol); k is the number
of times that (i, j) was seen.
]
Examples
========
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not, Xor, to_cnf, symbols
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, ((2, 0, 1),)): [x],
(0, 0, 1, 0, ((2, 1, 1),)): [a, b],
(0, 0, 1, 2, ((2, 0, 1),)): [y]}
>>> dict(finger(x & ~y))
{(0, 1, 0, 0, ()): [y], (1, 0, 0, 0, ()): [x]}
In the following, the (5, 2, 6) means that there were 6 Or
functions in which a symbol appeared as itself amongst 5 arguments in
which there were also 2 negated symbols, e.g. ``(a0 | a1 | a2 | ~a3 | ~a4)``
is counted once for a0, a1 and a2.
>>> dict(finger(to_cnf(Xor(*symbols('a:5')))))
{(0, 0, 8, 8, ((5, 0, 1), (5, 2, 6), (5, 4, 1))): [a0, a1, a2, a3, a4]}
The equation must not have more than one level of nesting:
>>> dict(finger(And(Or(x, y), y)))
{(0, 0, 1, 0, ((2, 0, 1),)): [x], (1, 0, 1, 0, ((2, 0, 1),)): [y]}
>>> dict(finger(And(Or(x, And(a, x)), y)))
Traceback (most recent call last):
...
NotImplementedError: unexpected level of nesting
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(list(zip(f, [[0]*4 + [defaultdict(int)] for fi in f])))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args), sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1][o] += 1
elif ai.is_Not:
d[ai.args[0]][3] += 1
else:
raise NotImplementedError('unexpected level of nesting')
inv = defaultdict(list)
for k, v in ordered(iter(d.items())):
v[-1] = tuple(sorted([i + (j,) for i, j in v[-1].items()]))
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of *bool1*, and the mapping of variables
that makes the two expressions *bool1* and *bool2* represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, ``And(x, y)`` is logically equivalent to ``And(a, b)`` for
the mapping ``{x: a, y: b}`` or ``{x: b, y: a}``.
If no such mapping exists, return ``False``.
Examples
========
>>> from sympy import SOPform, bool_map, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform([x, z, y],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c],[[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq, eq2)
((x & y) | (w & ~y) | (z & ~y), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_map(eq, eq.subs(c, x))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
``And(x, Not(y), Or(w, Not(z)))``.
Basic.match is not robust enough (see issue 4835) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None # maybe simplification makes them the same?
if len(function1.args) != len(function2.args):
return None # maybe simplification makes them the same?
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m
def _apply_patternbased_simplification(rv, patterns, measure,
dominatingvalue,
replacementvalue=None,
threeterm_patterns=None):
"""
Replace patterns of Relational
Parameters
==========
rv : Expr
Boolean expression
patterns : tuple
Tuple of tuples, with (pattern to simplify, simplified pattern) with
two terms.
measure : function
Simplification measure.
dominatingvalue : Boolean or ``None``
The dominating value for the function of consideration.
For example, for :py:class:`~.And` ``S.false`` is dominating.
As soon as one expression is ``S.false`` in :py:class:`~.And`,
the whole expression is ``S.false``.
replacementvalue : Boolean or ``None``, optional
The resulting value for the whole expression if one argument
evaluates to ``dominatingvalue``.
For example, for :py:class:`~.Nand` ``S.false`` is dominating, but
in this case the resulting value is ``S.true``. Default is ``None``.
If ``replacementvalue`` is ``None`` and ``dominatingvalue`` is not
``None``, ``replacementvalue = dominatingvalue``.
threeterm_patterns : tuple, optional
Tuple of tuples, with (pattern to simplify, simplified pattern) with
three terms.
"""
from sympy.core.relational import Relational, _canonical
if replacementvalue is None and dominatingvalue is not None:
replacementvalue = dominatingvalue
# Use replacement patterns for Relationals
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if len(Rel) <= 1:
return rv
Rel, nonRealRel = sift(Rel, lambda i: not any(s.is_real is False
for s in i.free_symbols),
binary=True)
Rel = [i.canonical for i in Rel]
if threeterm_patterns and len(Rel) >= 3:
Rel = _apply_patternbased_threeterm_simplification(Rel,
threeterm_patterns, rv.func, dominatingvalue,
replacementvalue, measure)
Rel = _apply_patternbased_twoterm_simplification(Rel, patterns,
rv.func, dominatingvalue, replacementvalue, measure)
rv = rv.func(*([_canonical(i) for i in ordered(Rel)]
+ nonRel + nonRealRel))
return rv
def _apply_patternbased_twoterm_simplification(Rel, patterns, func,
dominatingvalue,
replacementvalue,
measure):
""" Apply pattern-based two-term simplification."""
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core.relational import Ge, Gt, _Inequality
changed = True
while changed and len(Rel) >= 2:
changed = False
# Use only < or <=
Rel = [r.reversed if isinstance(r, (Ge, Gt)) else r for r in Rel]
# Sort based on ordered
Rel = list(ordered(Rel))
# Eq and Ne must be tested reversed as well
rtmp = [(r, ) if isinstance(r, _Inequality) else (r, r.reversed) for r in Rel]
# Create a list of possible replacements
results = []
# Try all combinations of possibly reversed relational
for ((i, pi), (j, pj)) in combinations(enumerate(rtmp), 2):
for pattern, simp in patterns:
res = []
for p1, p2 in product(pi, pj):
# use SymPy matching
oldexpr = Tuple(p1, p2)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
if res:
for tmpres, oldexpr in res:
# we have a matching, compute replacement
np = simp.xreplace(tmpres)
if np == dominatingvalue:
# if dominatingvalue, the whole expression
# will be replacementvalue
return [replacementvalue]
# add replacement
if not isinstance(np, ITE) and not np.has(Min, Max):
# We only want to use ITE and Min/Max replacements if
# they simplify to a relational
costsaving = measure(func(*oldexpr.args)) - measure(np)
if costsaving > 0:
results.append((costsaving, ([i, j], np)))
if results:
# Sort results based on complexity
results = sorted(results,
key=lambda pair: pair[0], reverse=True)
# Replace the one providing most simplification
replacement = results[0][1]
idx, newrel = replacement
idx.sort()
# Remove the old relationals
for index in reversed(idx):
del Rel[index]
if dominatingvalue is None or newrel != Not(dominatingvalue):
# Insert the new one (no need to insert a value that will
# not affect the result)
if newrel.func == func:
for a in newrel.args:
Rel.append(a)
else:
Rel.append(newrel)
# We did change something so try again
changed = True
return Rel
def _apply_patternbased_threeterm_simplification(Rel, patterns, func,
dominatingvalue,
replacementvalue,
measure):
""" Apply pattern-based three-term simplification."""
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core.relational import Le, Lt, _Inequality
changed = True
while changed and len(Rel) >= 3:
changed = False
# Use only > or >=
Rel = [r.reversed if isinstance(r, (Le, Lt)) else r for r in Rel]
# Sort based on ordered
Rel = list(ordered(Rel))
# Create a list of possible replacements
results = []
# Eq and Ne must be tested reversed as well
rtmp = [(r, ) if isinstance(r, _Inequality) else (r, r.reversed) for r in Rel]
# Try all combinations of possibly reversed relational
for ((i, pi), (j, pj), (k, pk)) in permutations(enumerate(rtmp), 3):
for pattern, simp in patterns:
res = []
for p1, p2, p3 in product(pi, pj, pk):
# use SymPy matching
oldexpr = Tuple(p1, p2, p3)
tmpres = oldexpr.match(pattern)
if tmpres:
res.append((tmpres, oldexpr))
if res:
for tmpres, oldexpr in res:
# we have a matching, compute replacement
np = simp.xreplace(tmpres)
if np == dominatingvalue:
# if dominatingvalue, the whole expression
# will be replacementvalue
return [replacementvalue]
# add replacement
if not isinstance(np, ITE) and not np.has(Min, Max):
# We only want to use ITE and Min/Max replacements if
# they simplify to a relational
costsaving = measure(func(*oldexpr.args)) - measure(np)
if costsaving > 0:
results.append((costsaving, ([i, j, k], np)))
if results:
# Sort results based on complexity
results = sorted(results,
key=lambda pair: pair[0], reverse=True)
# Replace the one providing most simplification
replacement = results[0][1]
idx, newrel = replacement
idx.sort()
# Remove the old relationals
for index in reversed(idx):
del Rel[index]
if dominatingvalue is None or newrel != Not(dominatingvalue):
# Insert the new one (no need to insert a value that will
# not affect the result)
if newrel.func == func:
for a in newrel.args:
Rel.append(a)
else:
Rel.append(newrel)
# We did change something so try again
changed = True
return Rel
@cacheit
def _simplify_patterns_and():
""" Two-term patterns for And."""
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.miscellaneous import Min, Max
a = Wild('a')
b = Wild('b')
c = Wild('c')
# Relationals patterns should be in alphabetical order
# (pattern1, pattern2, simplified)
# Do not use Ge, Gt
_matchers_and = ((Tuple(Eq(a, b), Lt(a, b)), false),
#(Tuple(Eq(a, b), Lt(b, a)), S.false),
#(Tuple(Le(b, a), Lt(a, b)), S.false),
#(Tuple(Lt(b, a), Le(a, b)), S.false),
(Tuple(Lt(b, a), Lt(a, b)), false),
(Tuple(Eq(a, b), Le(b, a)), Eq(a, b)),
#(Tuple(Eq(a, b), Le(a, b)), Eq(a, b)),
#(Tuple(Le(b, a), Lt(b, a)), Gt(a, b)),
(Tuple(Le(b, a), Le(a, b)), Eq(a, b)),
#(Tuple(Le(b, a), Ne(a, b)), Gt(a, b)),
#(Tuple(Lt(b, a), Ne(a, b)), Gt(a, b)),
(Tuple(Le(a, b), Lt(a, b)), Lt(a, b)),
(Tuple(Le(a, b), Ne(a, b)), Lt(a, b)),
(Tuple(Lt(a, b), Ne(a, b)), Lt(a, b)),
# Sign
(Tuple(Eq(a, b), Eq(a, -b)), And(Eq(a, S.Zero), Eq(b, S.Zero))),
# Min/Max/ITE
(Tuple(Le(b, a), Le(c, a)), Ge(a, Max(b, c))),
(Tuple(Le(b, a), Lt(c, a)), ITE(b > c, Ge(a, b), Gt(a, c))),
(Tuple(Lt(b, a), Lt(c, a)), Gt(a, Max(b, c))),
(Tuple(Le(a, b), Le(a, c)), Le(a, Min(b, c))),
(Tuple(Le(a, b), Lt(a, c)), ITE(b < c, Le(a, b), Lt(a, c))),
(Tuple(Lt(a, b), Lt(a, c)), Lt(a, Min(b, c))),
(Tuple(Le(a, b), Le(c, a)), ITE(Eq(b, c), Eq(a, b), ITE(b < c, false, And(Le(a, b), Ge(a, c))))),
(Tuple(Le(c, a), Le(a, b)), ITE(Eq(b, c), Eq(a, b), ITE(b < c, false, And(Le(a, b), Ge(a, c))))),
(Tuple(Lt(a, b), Lt(c, a)), ITE(b < c, false, And(Lt(a, b), Gt(a, c)))),
(Tuple(Lt(c, a), Lt(a, b)), ITE(b < c, false, And(Lt(a, b), Gt(a, c)))),
(Tuple(Le(a, b), Lt(c, a)), ITE(b <= c, false, And(Le(a, b), Gt(a, c)))),
(Tuple(Le(c, a), Lt(a, b)), ITE(b <= c, false, And(Lt(a, b), Ge(a, c)))),
(Tuple(Eq(a, b), Eq(a, c)), ITE(Eq(b, c), Eq(a, b), false)),
(Tuple(Lt(a, b), Lt(-b, a)), ITE(b > 0, Lt(Abs(a), b), false)),
(Tuple(Le(a, b), Le(-b, a)), ITE(b >= 0, Le(Abs(a), b), false)),
)
return _matchers_and
@cacheit
def _simplify_patterns_and3():
""" Three-term patterns for And."""
from sympy.core import Wild
from sympy.core.relational import Eq, Ge, Gt
a = Wild('a')
b = Wild('b')
c = Wild('c')
# Relationals patterns should be in alphabetical order
# (pattern1, pattern2, pattern3, simplified)
# Do not use Le, Lt
_matchers_and = ((Tuple(Ge(a, b), Ge(b, c), Gt(c, a)), false),
(Tuple(Ge(a, b), Gt(b, c), Gt(c, a)), false),
(Tuple(Gt(a, b), Gt(b, c), Gt(c, a)), false),
# (Tuple(Ge(c, a), Gt(a, b), Gt(b, c)), S.false),
# Lower bound relations
# Commented out combinations that does not simplify
(Tuple(Ge(a, b), Ge(a, c), Ge(b, c)), And(Ge(a, b), Ge(b, c))),
(Tuple(Ge(a, b), Ge(a, c), Gt(b, c)), And(Ge(a, b), Gt(b, c))),
# (Tuple(Ge(a, b), Gt(a, c), Ge(b, c)), And(Ge(a, b), Ge(b, c))),
(Tuple(Ge(a, b), Gt(a, c), Gt(b, c)), And(Ge(a, b), Gt(b, c))),
# (Tuple(Gt(a, b), Ge(a, c), Ge(b, c)), And(Gt(a, b), Ge(b, c))),
(Tuple(Ge(a, c), Gt(a, b), Gt(b, c)), And(Gt(a, b), Gt(b, c))),
(Tuple(Ge(b, c), Gt(a, b), Gt(a, c)), And(Gt(a, b), Ge(b, c))),
(Tuple(Gt(a, b), Gt(a, c), Gt(b, c)), And(Gt(a, b), Gt(b, c))),
# Upper bound relations
# Commented out combinations that does not simplify
(Tuple(Ge(b, a), Ge(c, a), Ge(b, c)), And(Ge(c, a), Ge(b, c))),
(Tuple(Ge(b, a), Ge(c, a), Gt(b, c)), And(Ge(c, a), Gt(b, c))),
# (Tuple(Ge(b, a), Gt(c, a), Ge(b, c)), And(Gt(c, a), Ge(b, c))),
(Tuple(Ge(b, a), Gt(c, a), Gt(b, c)), And(Gt(c, a), Gt(b, c))),
# (Tuple(Gt(b, a), Ge(c, a), Ge(b, c)), And(Ge(c, a), Ge(b, c))),
(Tuple(Ge(c, a), Gt(b, a), Gt(b, c)), And(Ge(c, a), Gt(b, c))),
(Tuple(Ge(b, c), Gt(b, a), Gt(c, a)), And(Gt(c, a), Ge(b, c))),
(Tuple(Gt(b, a), Gt(c, a), Gt(b, c)), And(Gt(c, a), Gt(b, c))),
# Circular relation
(Tuple(Ge(a, b), Ge(b, c), Ge(c, a)), And(Eq(a, b), Eq(b, c))),
)
return _matchers_and
@cacheit
def _simplify_patterns_or():
""" Two-term patterns for Or."""
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.miscellaneous import Min, Max
a = Wild('a')
b = Wild('b')
c = Wild('c')
# Relationals patterns should be in alphabetical order
# (pattern1, pattern2, simplified)
# Do not use Ge, Gt
_matchers_or = ((Tuple(Le(b, a), Le(a, b)), true),
#(Tuple(Le(b, a), Lt(a, b)), true),
(Tuple(Le(b, a), Ne(a, b)), true),
#(Tuple(Le(a, b), Lt(b, a)), true),
#(Tuple(Le(a, b), Ne(a, b)), true),
#(Tuple(Eq(a, b), Le(b, a)), Ge(a, b)),
#(Tuple(Eq(a, b), Lt(b, a)), Ge(a, b)),
(Tuple(Eq(a, b), Le(a, b)), Le(a, b)),
(Tuple(Eq(a, b), Lt(a, b)), Le(a, b)),
#(Tuple(Le(b, a), Lt(b, a)), Ge(a, b)),
(Tuple(Lt(b, a), Lt(a, b)), Ne(a, b)),
(Tuple(Lt(b, a), Ne(a, b)), Ne(a, b)),
(Tuple(Le(a, b), Lt(a, b)), Le(a, b)),
#(Tuple(Lt(a, b), Ne(a, b)), Ne(a, b)),
(Tuple(Eq(a, b), Ne(a, c)), ITE(Eq(b, c), true, Ne(a, c))),
(Tuple(Ne(a, b), Ne(a, c)), ITE(Eq(b, c), Ne(a, b), true)),
# Min/Max/ITE
(Tuple(Le(b, a), Le(c, a)), Ge(a, Min(b, c))),
#(Tuple(Ge(b, a), Ge(c, a)), Ge(Min(b, c), a)),
(Tuple(Le(b, a), Lt(c, a)), ITE(b > c, Lt(c, a), Le(b, a))),
(Tuple(Lt(b, a), Lt(c, a)), Gt(a, Min(b, c))),
#(Tuple(Gt(b, a), Gt(c, a)), Gt(Min(b, c), a)),
(Tuple(Le(a, b), Le(a, c)), Le(a, Max(b, c))),
#(Tuple(Le(b, a), Le(c, a)), Le(Max(b, c), a)),
(Tuple(Le(a, b), Lt(a, c)), ITE(b >= c, Le(a, b), Lt(a, c))),
(Tuple(Lt(a, b), Lt(a, c)), Lt(a, Max(b, c))),
#(Tuple(Lt(b, a), Lt(c, a)), Lt(Max(b, c), a)),
(Tuple(Le(a, b), Le(c, a)), ITE(b >= c, true, Or(Le(a, b), Ge(a, c)))),
(Tuple(Le(c, a), Le(a, b)), ITE(b >= c, true, Or(Le(a, b), Ge(a, c)))),
(Tuple(Lt(a, b), Lt(c, a)), ITE(b > c, true, Or(Lt(a, b), Gt(a, c)))),
(Tuple(Lt(c, a), Lt(a, b)), ITE(b > c, true, Or(Lt(a, b), Gt(a, c)))),
(Tuple(Le(a, b), Lt(c, a)), ITE(b >= c, true, Or(Le(a, b), Gt(a, c)))),
(Tuple(Le(c, a), Lt(a, b)), ITE(b >= c, true, Or(Lt(a, b), Ge(a, c)))),
(Tuple(Lt(b, a), Lt(a, -b)), ITE(b >= 0, Gt(Abs(a), b), true)),
(Tuple(Le(b, a), Le(a, -b)), ITE(b > 0, Ge(Abs(a), b), true)),
)
return _matchers_or
@cacheit
def _simplify_patterns_xor():
""" Two-term patterns for Xor."""
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.core import Wild
from sympy.core.relational import Eq, Ne, Ge, Gt, Le, Lt
a = Wild('a')
b = Wild('b')
c = Wild('c')
# Relationals patterns should be in alphabetical order
# (pattern1, pattern2, simplified)
# Do not use Ge, Gt
_matchers_xor = (#(Tuple(Le(b, a), Lt(a, b)), true),
#(Tuple(Lt(b, a), Le(a, b)), true),
#(Tuple(Eq(a, b), Le(b, a)), Gt(a, b)),
#(Tuple(Eq(a, b), Lt(b, a)), Ge(a, b)),
(Tuple(Eq(a, b), Le(a, b)), Lt(a, b)),
(Tuple(Eq(a, b), Lt(a, b)), Le(a, b)),
(Tuple(Le(a, b), Lt(a, b)), Eq(a, b)),
(Tuple(Le(a, b), Le(b, a)), Ne(a, b)),
(Tuple(Le(b, a), Ne(a, b)), Le(a, b)),
# (Tuple(Lt(b, a), Lt(a, b)), Ne(a, b)),
(Tuple(Lt(b, a), Ne(a, b)), Lt(a, b)),
# (Tuple(Le(a, b), Lt(a, b)), Eq(a, b)),
# (Tuple(Le(a, b), Ne(a, b)), Ge(a, b)),
# (Tuple(Lt(a, b), Ne(a, b)), Gt(a, b)),
# Min/Max/ITE
(Tuple(Le(b, a), Le(c, a)),
And(Ge(a, Min(b, c)), Lt(a, Max(b, c)))),
(Tuple(Le(b, a), Lt(c, a)),
ITE(b > c, And(Gt(a, c), Lt(a, b)),
And(Ge(a, b), Le(a, c)))),
(Tuple(Lt(b, a), Lt(c, a)),
And(Gt(a, Min(b, c)), Le(a, Max(b, c)))),
(Tuple(Le(a, b), Le(a, c)),
And(Le(a, Max(b, c)), Gt(a, Min(b, c)))),
(Tuple(Le(a, b), Lt(a, c)),
ITE(b < c, And(Lt(a, c), Gt(a, b)),
And(Le(a, b), Ge(a, c)))),
(Tuple(Lt(a, b), Lt(a, c)),
And(Lt(a, Max(b, c)), Ge(a, Min(b, c)))),
)
return _matchers_xor
def simplify_univariate(expr):
"""return a simplified version of univariate boolean expression, else ``expr``"""
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.relational import Eq, Ne
if not isinstance(expr, BooleanFunction):
return expr
if expr.atoms(Eq, Ne):
return expr
c = expr
free = c.free_symbols
if len(free) != 1:
return c
x = free.pop()
ok, i = Piecewise((0, c), evaluate=False
)._intervals(x, err_on_Eq=True)
if not ok:
return c
if not i:
return false
args = []
for a, b, _, _ in i:
if a is S.NegativeInfinity:
if b is S.Infinity:
c = true
else:
if c.subs(x, b) == True:
c = (x <= b)
else:
c = (x < b)
else:
incl_a = (c.subs(x, a) == True)
incl_b = (c.subs(x, b) == True)
if incl_a and incl_b:
if b.is_infinite:
c = (x >= a)
else:
c = And(a <= x, x <= b)
elif incl_a:
c = And(a <= x, x < b)
elif incl_b:
if b.is_infinite:
c = (x > a)
else:
c = And(a < x, x <= b)
else:
c = And(a < x, x < b)
args.append(c)
return Or(*args)
# Classes corresponding to logic gates
# Used in gateinputcount method
BooleanGates = (And, Or, Xor, Nand, Nor, Not, Xnor, ITE)
def gateinputcount(expr):
"""
Return the total number of inputs for the logic gates realizing the
Boolean expression.
Returns
=======
int
Number of gate inputs
Note
====
Not all Boolean functions count as gate here, only those that are
considered to be standard gates. These are: :py:class:`~.And`,
:py:class:`~.Or`, :py:class:`~.Xor`, :py:class:`~.Not`, and
:py:class:`~.ITE` (multiplexer). :py:class:`~.Nand`, :py:class:`~.Nor`,
and :py:class:`~.Xnor` will be evaluated to ``Not(And())`` etc.
Examples
========
>>> from sympy.logic import And, Or, Nand, Not, gateinputcount
>>> from sympy.abc import x, y, z
>>> expr = And(x, y)
>>> gateinputcount(expr)
2
>>> gateinputcount(Or(expr, z))
4
Note that ``Nand`` is automatically evaluated to ``Not(And())`` so
>>> gateinputcount(Nand(x, y, z))
4
>>> gateinputcount(Not(And(x, y, z)))
4
Although this can be avoided by using ``evaluate=False``
>>> gateinputcount(Nand(x, y, z, evaluate=False))
3
Also note that a comparison will count as a Boolean variable:
>>> gateinputcount(And(x > z, y >= 2))
2
As will a symbol:
>>> gateinputcount(x)
0
"""
if not isinstance(expr, Boolean):
raise TypeError("Expression must be Boolean")
if isinstance(expr, BooleanGates):
return len(expr.args) + sum(gateinputcount(x) for x in expr.args)
return 0
| Exclusive |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 52373,
"end": 52779
} | class ____(DelegatingLexer):
"""
Coldfusion markup/script components
.. versionadded:: 2.0
"""
name = 'Coldfusion CFC'
aliases = ['cfc']
filenames = ['*.cfc']
mimetypes = []
def __init__(self, **options):
super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
**options)
| ColdfusionCFCLexer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 4032,
"end": 6480
} | class ____(_LiteralRoundTripFixture, fixtures.TestBase):
__requires__ = ("unicode_data",)
data = (
"Alors vous imaginez ma 🐍 surprise, au lever du jour, "
"quand une drôle de petite 🐍 voix m’a réveillé. Elle "
"disait: « S’il vous plaît… dessine-moi 🐍 un mouton! »"
)
@property
def supports_whereclause(self):
return config.requirements.expressions_against_unbounded_text.enabled
@classmethod
def define_tables(cls, metadata):
Table(
"unicode_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("unicode_data", cls.datatype),
)
def test_round_trip(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(
unicode_table.insert(), {"id": 1, "unicode_data": self.data}
)
row = connection.execute(select(unicode_table.c.unicode_data)).first()
eq_(row, (self.data,))
assert isinstance(row[0], str)
def test_round_trip_executemany(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(
unicode_table.insert(),
[{"id": i, "unicode_data": self.data} for i in range(1, 4)],
)
rows = connection.execute(
select(unicode_table.c.unicode_data)
).fetchall()
eq_(rows, [(self.data,) for i in range(1, 4)])
for row in rows:
assert isinstance(row[0], str)
def _test_null_strings(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(
unicode_table.insert(), {"id": 1, "unicode_data": None}
)
row = connection.execute(select(unicode_table.c.unicode_data)).first()
eq_(row, (None,))
def _test_empty_strings(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(
unicode_table.insert(), {"id": 1, "unicode_data": ""}
)
row = connection.execute(select(unicode_table.c.unicode_data)).first()
eq_(row, ("",))
def test_literal(self, literal_round_trip):
literal_round_trip(self.datatype, [self.data], [self.data])
def test_literal_non_ascii(self, literal_round_trip):
literal_round_trip(self.datatype, ["réve🐍 illé"], ["réve🐍 illé"])
| _UnicodeFixture |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.