language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tox-dev__tox | src/tox/tox_env/python/virtual_env/api.py | {
"start": 986,
"end": 7684
} | class ____(Python, ABC):
"""A python executor that uses the virtualenv project with pip."""
def __init__(self, create_args: ToxEnvCreateArgs) -> None:
self._virtualenv_session: Session | None = None
self._executor: Execute | None = None
self._installer: Pip | None = None
super().__init__(create_args)
def register_config(self) -> None:
super().register_config()
self.conf.add_config(
keys=["system_site_packages", "sitepackages"],
of_type=bool,
default=lambda conf, name: StrConvert().to_bool( # noqa: ARG005
self.environment_variables.get("VIRTUALENV_SYSTEM_SITE_PACKAGES", "False"),
),
desc="create virtual environments that also have access to globally installed packages.",
)
self.conf.add_config(
keys=["always_copy", "alwayscopy"],
of_type=bool,
default=lambda conf, name: StrConvert().to_bool( # noqa: ARG005
self.environment_variables.get(
"VIRTUALENV_COPIES",
self.environment_variables.get("VIRTUALENV_ALWAYS_COPY", "False"),
),
),
desc="force virtualenv to always copy rather than symlink",
)
self.conf.add_config(
keys=["download"],
of_type=bool,
default=lambda conf, name: StrConvert().to_bool( # noqa: ARG005
self.environment_variables.get("VIRTUALENV_DOWNLOAD", "False"),
),
desc="true if you want virtualenv to upgrade pip/wheel/setuptools to the latest version",
)
@property
def executor(self) -> Execute:
if self._executor is None:
self._executor = LocalSubProcessExecutor(self.options.is_colored)
return self._executor
@property
def installer(self) -> Pip:
if self._installer is None:
self._installer = Pip(self)
return self._installer
def python_cache(self) -> dict[str, Any]:
base = super().python_cache()
base.update(
{
"executable": str(self.base_python.extra["executable"]),
"virtualenv version": virtualenv_version,
},
)
return base
def _get_env_journal_python(self) -> dict[str, Any]:
base = super()._get_env_journal_python()
base["executable"] = str(self.base_python.extra["executable"])
return base
def _default_pass_env(self) -> list[str]:
env = super()._default_pass_env()
env.append("PIP_*") # we use pip as installer
env.append("VIRTUALENV_*") # we use virtualenv as isolation creator
return env
def _default_set_env(self) -> dict[str, str]:
env = super()._default_set_env()
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
return env
@property
def session(self) -> Session:
if self._virtualenv_session is None:
env_dir = [str(self.env_dir)]
env = self.virtualenv_env_vars()
self._virtualenv_session = session_via_cli(env_dir, options=None, setup_logging=False, env=env)
return self._virtualenv_session
def virtualenv_env_vars(self) -> dict[str, str]:
env = self.environment_variables.copy()
base_python: list[str] = self.conf["base_python"]
if "VIRTUALENV_NO_PERIODIC_UPDATE" not in env:
env["VIRTUALENV_NO_PERIODIC_UPDATE"] = "True"
env["VIRTUALENV_CLEAR"] = "False"
env["VIRTUALENV_SYSTEM_SITE_PACKAGES"] = str(self.conf["system_site_packages"])
env["VIRTUALENV_COPIES"] = str(self.conf["always_copy"])
env["VIRTUALENV_DOWNLOAD"] = str(self.conf["download"])
env["VIRTUALENV_PYTHON"] = "\n".join(base_python)
if hasattr(self.options, "discover"):
env["VIRTUALENV_TRY_FIRST_WITH"] = os.pathsep.join(self.options.discover)
return env
@property
def creator(self) -> Creator:
return self.session.creator
def create_python_env(self) -> None:
self.session.run()
def _get_python(self, base_python: list[str]) -> PythonInfo | None: # noqa: ARG002
# the base pythons are injected into the virtualenv_env_vars, so we don't need to use it here
try:
interpreter = self.creator.interpreter
except (FileNotFoundError, RuntimeError): # Unable to find the interpreter
return None
return PythonInfo(
implementation=interpreter.implementation,
version_info=interpreter.version_info,
version=interpreter.version,
is_64=(interpreter.architecture == 64), # noqa: PLR2004
platform=interpreter.platform,
extra={"executable": Path(interpreter.system_executable).resolve()},
free_threaded=interpreter.free_threaded,
)
def prepend_env_var_path(self) -> list[Path]:
"""Paths to add to the executable."""
# we use the original executable as shims may be somewhere else
return list(dict.fromkeys((self.creator.bin_dir, self.creator.script_dir)))
def env_site_package_dir(self) -> Path:
return cast("Path", self.creator.purelib)
def env_python(self) -> Path:
return cast("Path", self.creator.exe)
def env_bin_dir(self) -> Path:
return cast("Path", self.creator.script_dir)
@property
def runs_on_platform(self) -> str:
return sys.platform
@property
def environment_variables(self) -> dict[str, str]:
environment_variables = super().environment_variables
environment_variables["VIRTUAL_ENV"] = str(self.conf["env_dir"])
return environment_variables
@classmethod
def python_spec_for_path(cls, path: Path) -> PythonSpec:
"""
Get the spec for an absolute path to a Python executable.
:param path: the path investigated
:return: the found spec
"""
info = cls.get_virtualenv_py_info(path)
return PythonSpec.from_string_spec(
f"{info.implementation}{info.version_info.major}{info.version_info.minor}-{info.architecture}"
)
@staticmethod
def get_virtualenv_py_info(path: Path) -> VirtualenvPythonInfo:
"""
Get the version info for an absolute path to a Python executable.
:param path: the path investigated
:return: the found information (cached)
"""
return cached_py_info.from_exe(
cached_py_info.PythonInfo,
app_data.make_app_data(None, read_only=False, env=os.environ),
str(path),
)
| VirtualEnv |
python | pypa__pip | src/pip/_vendor/idna/core.py | {
"start": 366,
"end": 480
} | class ____(IDNAError):
"""Exception when bidirectional requirements are not satisfied"""
pass
| IDNABidiError |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py | {
"start": 1149,
"end": 1705
} | class ____(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
self.model = mnist_model()
# CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<1x28x28x1xf32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]}
# CHECK: attributes {{.*}} tf_saved_model.exported_names = ["my_predict"]
@tf.function(input_signature=[
tf.TensorSpec([1, 28, 28, 1], tf.float32),
])
def my_predict(self, x):
return self.model(x)
if __name__ == '__main__':
common.do_test(TestModule, exported_names=['my_predict'])
| TestModule |
python | gevent__gevent | src/gevent/_threading.py | {
"start": 1160,
"end": 4261
} | class ____(object):
# We could use libuv's ``uv_cond_wait`` to implement this whole
# class and get native timeouts and native performance everywhere.
# pylint:disable=method-hidden
__slots__ = (
'_lock',
'_waiters',
)
def __init__(self, lock):
# This lock is used to protect our own data structures;
# calls to ``wait`` and ``notify_one`` *must* be holding this
# lock.
self._lock = lock
self._waiters = []
# No need to special case for _release_save and
# _acquire_restore; those are only used for RLock, and
# we don't use those.
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, t, v, tb):
return self._lock.__exit__(t, v, tb)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def wait(self, wait_lock, timeout=-1, _wait_for_notify=acquire_with_timeout):
# This variable is for the monitoring utils to know that
# this is an idle frame and shouldn't be counted.
gevent_threadpool_worker_idle = True # pylint:disable=unused-variable
# The _lock must be held.
# The ``wait_lock`` must be *un*owned, so the timeout doesn't apply there.
# Take that lock now.
wait_lock.acquire()
self._waiters.append(wait_lock)
self._lock.release()
try:
# We're already holding this native lock, so when we try to acquire it again,
# that won't work and we'll block until someone calls notify_one() (which might
# have already happened).
notified = _wait_for_notify(wait_lock, timeout)
finally:
self._lock.acquire()
# Now that we've acquired _lock again, no one can call notify_one(), or this
# method.
if not notified:
# We need to come out of the waiters list. IF we're still there; it's
# possible that between the call to _acquire() returning False,
# and the time that we acquired _lock, someone did a ``notify_one``
# and released the lock. For that reason, do a non-blocking acquire()
notified = wait_lock.acquire(False)
if not notified:
# Well narf. No go. We must stil be in the waiters list, so take us out
self._waiters.remove(wait_lock)
# We didn't get notified, but we're still holding a lock that we
# need to release.
wait_lock.release()
else:
# We got notified, so we need to reset.
wait_lock.release()
return notified
def notify_one(self):
# The lock SHOULD be owned, but we don't check that.
try:
waiter = self._waiters.pop()
except IndexError:
# Nobody around
pass
else:
# The owner of the ``waiter`` is blocked on
# acquiring it again, so when we ``release`` it, it
# is free to be scheduled and resume.
waiter.release()
| _Condition |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar13.py | {
"start": 345,
"end": 3816
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.use_data_bars_2010 = True
worksheet.conditional_format("A1", {"type": "data_bar"})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_comm.py | {
"start": 76928,
"end": 78545
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return 1
def test_size1_reduceop(self):
from torch.distributed.distributed_c10d import ReduceOp
model = nn.Linear(1024, 1025)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters())
fully_shard(
model,
mesh=init_device_mesh(device_type.type, (1,)),
reshard_after_forward=False,
)
optim = torch.optim.Adam(model.parameters())
inp = torch.randn(1025, 1024, device=device_type.type)
for _ in range(3):
ref_optim.zero_grad()
ref_loss = ref_model(inp).sum()
ref_loss.backward()
for param in ref_model.parameters():
dist.all_reduce(param.grad, op=dist.ReduceOp.SUM)
ref_optim.step()
optim.zero_grad()
loss = model(inp).sum()
loss.backward()
optim.step()
self.assertEqual(loss, ref_loss)
self.assertEqual(
model.bias.grad._local_tensor,
ref_model.bias.grad,
)
state = model._get_fsdp_state()
fsdp_param_group = state._fsdp_param_group
group = fsdp_param_group.mesh_info.shard_process_group
(
_,
_,
_,
all_reduce_op,
) = _get_gradient_divide_factors(group, None, torch.float32)
self.assertEqual(all_reduce_op, ReduceOp.SUM)
if __name__ == "__main__":
run_tests()
| TestFullyShardReduceOpWorldSize1 |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/colorbar/_tickfont.py | {
"start": 233,
"end": 9974
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour.colorbar"
_path_str = "histogram2dcontour.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | doocs__leetcode | solution/0300-0399/0326.Power of Three/Solution2.py | {
"start": 0,
"end": 107
} | class ____:
def isPowerOfThree(self, n: int) -> bool:
return n > 0 and 1162261467 % n == 0
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 2809,
"end": 2907
} | class ____(SQLRole):
__slots__ = ()
_role_name = "LIMIT / OFFSET expression"
| LimitOffsetRole |
python | networkx__networkx | networkx/classes/tests/test_function.py | {
"start": 14681,
"end": 35481
} | class ____:
@classmethod
def setup_class(cls):
cls.func = staticmethod(nx.common_neighbors)
def test_func(G, u, v, expected):
result = sorted(cls.func(G, u, v))
assert result == expected
cls.test = staticmethod(test_func)
def test_K5(self):
G = nx.complete_graph(5)
self.test(G, 0, 1, [2, 3, 4])
def test_P3(self):
G = nx.path_graph(3)
self.test(G, 0, 2, [1])
def test_S4(self):
G = nx.star_graph(4)
self.test(G, 1, 2, [0])
def test_digraph(self):
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2)])
self.func(G, 0, 2)
def test_nonexistent_nodes(self):
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5)
pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6)
def test_custom1(self):
"""Case of no common neighbors."""
G = nx.Graph()
G.add_nodes_from([0, 1])
self.test(G, 0, 1, [])
def test_custom2(self):
"""Case of equal nodes."""
G = nx.complete_graph(4)
self.test(G, 0, 0, [1, 2, 3])
@pytest.mark.parametrize(
"graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
)
def test_set_node_attributes(graph_type):
# Test single value
G = nx.path_graph(3, create_using=graph_type)
vals = 100
attr = "hello"
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == vals
assert G.nodes[1][attr] == vals
assert G.nodes[2][attr] == vals
# Test dictionary
G = nx.path_graph(3, create_using=graph_type)
vals = dict(zip(sorted(G.nodes()), range(len(G))))
attr = "hi"
nx.set_node_attributes(G, vals, attr)
assert G.nodes[0][attr] == 0
assert G.nodes[1][attr] == 1
assert G.nodes[2][attr] == 2
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=graph_type)
d = {"hi": 0, "hello": 200}
vals = dict.fromkeys(G.nodes(), d)
vals.pop(0)
nx.set_node_attributes(G, vals)
assert G.nodes[0] == {}
assert G.nodes[1]["hi"] == 0
assert G.nodes[2]["hello"] == 200
@pytest.mark.parametrize(
("values", "name"),
(
({0: "red", 1: "blue"}, "color"), # values dictionary
({0: {"color": "red"}, 1: {"color": "blue"}}, None), # dict-of-dict
),
)
def test_set_node_attributes_ignores_extra_nodes(values, name):
"""
When `values` is a dict or dict-of-dict keyed by nodes, ensure that keys
that correspond to nodes not in G are ignored.
"""
G = nx.Graph()
G.add_node(0)
nx.set_node_attributes(G, values, name)
assert G.nodes[0]["color"] == "red"
assert 1 not in G.nodes
@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph))
def test_set_edge_attributes(graph_type):
# Test single value
G = nx.path_graph(3, create_using=graph_type)
attr = "hello"
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == vals
assert G[1][2][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=graph_type)
attr = "hi"
edges = [(0, 1), (1, 2)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][attr] == 0
assert G[1][2][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=graph_type)
d = {"hi": 0, "hello": 200}
edges = [(0, 1)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1]["hi"] == 0
assert G[0][1]["hello"] == 200
assert G[1][2] == {}
@pytest.mark.parametrize(
("values", "name"),
(
({(0, 1): 1.0, (0, 2): 2.0}, "weight"), # values dict
({(0, 1): {"weight": 1.0}, (0, 2): {"weight": 2.0}}, None), # values dod
),
)
def test_set_edge_attributes_ignores_extra_edges(values, name):
"""If `values` is a dict or dict-of-dicts containing edges that are not in
G, data associate with these edges should be ignored.
"""
G = nx.Graph([(0, 1)])
nx.set_edge_attributes(G, values, name)
assert G[0][1]["weight"] == 1.0
assert (0, 2) not in G.edges
@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph))
def test_set_edge_attributes_multi(graph_type):
# Test single value
G = nx.path_graph(3, create_using=graph_type)
attr = "hello"
vals = 3
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == vals
assert G[1][2][0][attr] == vals
# Test multiple values
G = nx.path_graph(3, create_using=graph_type)
attr = "hi"
edges = [(0, 1, 0), (1, 2, 0)]
vals = dict(zip(edges, range(len(edges))))
nx.set_edge_attributes(G, vals, attr)
assert G[0][1][0][attr] == 0
assert G[1][2][0][attr] == 1
# Test dictionary of dictionaries
G = nx.path_graph(3, create_using=graph_type)
d = {"hi": 0, "hello": 200}
edges = [(0, 1, 0)]
vals = dict.fromkeys(edges, d)
nx.set_edge_attributes(G, vals)
assert G[0][1][0]["hi"] == 0
assert G[0][1][0]["hello"] == 200
assert G[1][2][0] == {}
@pytest.mark.parametrize(
("values", "name"),
(
({(0, 1, 0): 1.0, (0, 2, 0): 2.0}, "weight"), # values dict
({(0, 1, 0): {"weight": 1.0}, (0, 2, 0): {"weight": 2.0}}, None), # values dod
),
)
def test_set_edge_attributes_multi_ignores_extra_edges(values, name):
"""If `values` is a dict or dict-of-dicts containing edges that are not in
G, data associate with these edges should be ignored.
"""
G = nx.MultiGraph([(0, 1, 0), (0, 1, 1)])
nx.set_edge_attributes(G, values, name)
assert G[0][1][0]["weight"] == 1.0
assert G[0][1][1] == {}
assert (0, 2) not in G.edges()
def test_get_node_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = "hello"
vals = 100
nx.set_node_attributes(G, vals, attr)
attrs = nx.get_node_attributes(G, attr)
assert attrs[0] == vals
assert attrs[1] == vals
assert attrs[2] == vals
default_val = 1
G.add_node(4)
attrs = nx.get_node_attributes(G, attr, default=default_val)
assert attrs[4] == default_val
def test_get_edge_attributes():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
G = nx.path_graph(3, create_using=G)
attr = "hello"
vals = 100
nx.set_edge_attributes(G, vals, attr)
attrs = nx.get_edge_attributes(G, attr)
assert len(attrs) == 2
for edge in G.edges:
assert attrs[edge] == vals
default_val = vals
G.add_edge(4, 5)
deafult_attrs = nx.get_edge_attributes(G, attr, default=default_val)
assert len(deafult_attrs) == 3
for edge in G.edges:
assert deafult_attrs[edge] == vals
@pytest.mark.parametrize(
"graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
)
def test_remove_node_attributes(graph_type):
# Test removing single attribute
G = nx.path_graph(3, create_using=graph_type)
vals = 100
attr = "hello"
nx.set_node_attributes(G, vals, attr)
nx.remove_node_attributes(G, attr)
assert attr not in G.nodes[0]
assert attr not in G.nodes[1]
assert attr not in G.nodes[2]
# Test removing single attribute when multiple present
G = nx.path_graph(3, create_using=graph_type)
other_vals = 200
other_attr = "other"
nx.set_node_attributes(G, vals, attr)
nx.set_node_attributes(G, other_vals, other_attr)
nx.remove_node_attributes(G, attr)
assert attr not in G.nodes[0]
assert G.nodes[0][other_attr] == other_vals
assert attr not in G.nodes[1]
assert G.nodes[1][other_attr] == other_vals
assert attr not in G.nodes[2]
assert G.nodes[2][other_attr] == other_vals
# Test removing multiple attributes
G = nx.path_graph(3, create_using=graph_type)
nx.set_node_attributes(G, vals, attr)
nx.set_node_attributes(G, other_vals, other_attr)
nx.remove_node_attributes(G, attr, other_attr)
assert attr not in G.nodes[0] and other_attr not in G.nodes[0]
assert attr not in G.nodes[1] and other_attr not in G.nodes[1]
assert attr not in G.nodes[2] and other_attr not in G.nodes[2]
# Test removing multiple (but not all) attributes
G = nx.path_graph(3, create_using=graph_type)
third_vals = 300
third_attr = "three"
nx.set_node_attributes(
G,
{
n: {attr: vals, other_attr: other_vals, third_attr: third_vals}
for n in G.nodes()
},
)
nx.remove_node_attributes(G, other_attr, third_attr)
assert other_attr not in G.nodes[0] and third_attr not in G.nodes[0]
assert other_attr not in G.nodes[1] and third_attr not in G.nodes[1]
assert other_attr not in G.nodes[2] and third_attr not in G.nodes[2]
assert G.nodes[0][attr] == vals
assert G.nodes[1][attr] == vals
assert G.nodes[2][attr] == vals
# Test incomplete node attributes
G = nx.path_graph(3, create_using=graph_type)
nx.set_node_attributes(
G,
{
1: {attr: vals, other_attr: other_vals},
2: {attr: vals, other_attr: other_vals},
},
)
nx.remove_node_attributes(G, attr)
assert attr not in G.nodes[0]
assert attr not in G.nodes[1]
assert attr not in G.nodes[2]
assert G.nodes[1][other_attr] == other_vals
assert G.nodes[2][other_attr] == other_vals
# Test removing on a subset of nodes
G = nx.path_graph(3, create_using=graph_type)
nx.set_node_attributes(
G,
{
n: {attr: vals, other_attr: other_vals, third_attr: third_vals}
for n in G.nodes()
},
)
nx.remove_node_attributes(G, attr, other_attr, nbunch=[0, 1])
assert attr not in G.nodes[0] and other_attr not in G.nodes[0]
assert attr not in G.nodes[1] and other_attr not in G.nodes[1]
assert attr in G.nodes[2] and other_attr in G.nodes[2]
assert third_attr in G.nodes[0] and G.nodes[0][third_attr] == third_vals
assert third_attr in G.nodes[1] and G.nodes[1][third_attr] == third_vals
@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph))
def test_remove_edge_attributes(graph_type):
# Test removing single attribute
G = nx.path_graph(3, create_using=graph_type)
attr = "hello"
vals = 100
nx.set_edge_attributes(G, vals, attr)
nx.remove_edge_attributes(G, attr)
assert len(nx.get_edge_attributes(G, attr)) == 0
# Test removing only some attributes
G = nx.path_graph(3, create_using=graph_type)
other_attr = "other"
other_vals = 200
nx.set_edge_attributes(G, vals, attr)
nx.set_edge_attributes(G, other_vals, other_attr)
nx.remove_edge_attributes(G, attr)
assert attr not in G[0][1]
assert attr not in G[1][2]
assert G[0][1][other_attr] == 200
assert G[1][2][other_attr] == 200
# Test removing multiple attributes
G = nx.path_graph(3, create_using=graph_type)
nx.set_edge_attributes(G, vals, attr)
nx.set_edge_attributes(G, other_vals, other_attr)
nx.remove_edge_attributes(G, attr, other_attr)
assert attr not in G[0][1] and other_attr not in G[0][1]
assert attr not in G[1][2] and other_attr not in G[1][2]
# Test removing multiple (not all) attributes
G = nx.path_graph(3, create_using=graph_type)
third_attr = "third"
third_vals = 300
nx.set_edge_attributes(
G,
{
(u, v): {attr: vals, other_attr: other_vals, third_attr: third_vals}
for u, v in G.edges()
},
)
nx.remove_edge_attributes(G, other_attr, third_attr)
assert other_attr not in G[0][1] and third_attr not in G[0][1]
assert other_attr not in G[1][2] and third_attr not in G[1][2]
assert G[0][1][attr] == vals
assert G[1][2][attr] == vals
# Test removing incomplete edge attributes
G = nx.path_graph(3, create_using=graph_type)
nx.set_edge_attributes(G, {(0, 1): {attr: vals, other_attr: other_vals}})
nx.remove_edge_attributes(G, other_attr)
assert other_attr not in G[0][1] and G[0][1][attr] == vals
assert other_attr not in G[1][2]
# Test removing subset of edge attributes
G = nx.path_graph(3, create_using=graph_type)
nx.set_edge_attributes(
G,
{
(u, v): {attr: vals, other_attr: other_vals, third_attr: third_vals}
for u, v in G.edges()
},
)
nx.remove_edge_attributes(G, other_attr, third_attr, ebunch=[(0, 1)])
assert other_attr not in G[0][1] and third_attr not in G[0][1]
assert other_attr in G[1][2] and third_attr in G[1][2]
@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph))
def test_remove_multi_edge_attributes(graph_type):
# Test removing single attribute
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
attr = "hello"
vals = 100
nx.set_edge_attributes(G, vals, attr)
nx.remove_edge_attributes(G, attr)
assert attr not in G[0][1][0]
assert attr not in G[1][2][0]
assert attr not in G[1][2][1]
# Test removing only some attributes
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
other_attr = "other"
other_vals = 200
nx.set_edge_attributes(G, vals, attr)
nx.set_edge_attributes(G, other_vals, other_attr)
nx.remove_edge_attributes(G, attr)
assert attr not in G[0][1][0]
assert attr not in G[1][2][0]
assert attr not in G[1][2][1]
assert G[0][1][0][other_attr] == other_vals
assert G[1][2][0][other_attr] == other_vals
assert G[1][2][1][other_attr] == other_vals
# Test removing multiple attributes
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
nx.set_edge_attributes(G, vals, attr)
nx.set_edge_attributes(G, other_vals, other_attr)
nx.remove_edge_attributes(G, attr, other_attr)
assert attr not in G[0][1][0] and other_attr not in G[0][1][0]
assert attr not in G[1][2][0] and other_attr not in G[1][2][0]
assert attr not in G[1][2][1] and other_attr not in G[1][2][1]
# Test removing multiple (not all) attributes
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
third_attr = "third"
third_vals = 300
nx.set_edge_attributes(
G,
{
(u, v, k): {attr: vals, other_attr: other_vals, third_attr: third_vals}
for u, v, k in G.edges(keys=True)
},
)
nx.remove_edge_attributes(G, other_attr, third_attr)
assert other_attr not in G[0][1][0] and third_attr not in G[0][1][0]
assert other_attr not in G[1][2][0] and other_attr not in G[1][2][0]
assert other_attr not in G[1][2][1] and other_attr not in G[1][2][1]
assert G[0][1][0][attr] == vals
assert G[1][2][0][attr] == vals
assert G[1][2][1][attr] == vals
# Test removing incomplete edge attributes
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
nx.set_edge_attributes(
G,
{
(0, 1, 0): {attr: vals, other_attr: other_vals},
(1, 2, 1): {attr: vals, other_attr: other_vals},
},
)
nx.remove_edge_attributes(G, other_attr)
assert other_attr not in G[0][1][0] and G[0][1][0][attr] == vals
assert other_attr not in G[1][2][0]
assert other_attr not in G[1][2][1]
# Test removing subset of edge attributes
G = nx.path_graph(3, create_using=graph_type)
G.add_edge(1, 2)
nx.set_edge_attributes(
G,
{
(0, 1, 0): {attr: vals, other_attr: other_vals},
(1, 2, 0): {attr: vals, other_attr: other_vals},
(1, 2, 1): {attr: vals, other_attr: other_vals},
},
)
nx.remove_edge_attributes(G, attr, ebunch=[(0, 1, 0), (1, 2, 0)])
assert attr not in G[0][1][0] and other_attr in G[0][1][0]
assert attr not in G[1][2][0] and other_attr in G[1][2][0]
assert attr in G[1][2][1] and other_attr in G[1][2][1]
def test_is_empty():
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
for G in graphs:
assert nx.is_empty(G)
G.add_nodes_from(range(5))
assert nx.is_empty(G)
G.add_edges_from([(1, 2), (3, 4)])
assert not nx.is_empty(G)
@pytest.mark.parametrize(
"graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph]
)
def test_selfloops(graph_type):
G = nx.complete_graph(3, create_using=graph_type)
G.add_edge(0, 0)
assert nodes_equal(nx.nodes_with_selfloops(G), [0])
assert edges_equal(nx.selfloop_edges(G), [(0, 0)])
assert edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})])
assert nx.number_of_selfloops(G) == 1
@pytest.mark.parametrize(
"graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph]
)
def test_selfloop_edges_attr(graph_type):
G = nx.complete_graph(3, create_using=graph_type)
G.add_edge(0, 0)
G.add_edge(1, 1, weight=2)
assert edges_equal(
nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})]
)
assert edges_equal(nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)])
def test_selfloop_edges_multi_with_data_and_keys():
G = nx.complete_graph(3, create_using=nx.MultiGraph)
G.add_edge(0, 0, weight=10)
G.add_edge(0, 0, weight=100)
assert edges_equal(
nx.selfloop_edges(G, data="weight", keys=True), [(0, 0, 0, 10), (0, 0, 1, 100)]
)
@pytest.mark.parametrize("graph_type", [nx.Graph, nx.DiGraph])
def test_selfloops_removal(graph_type):
G = nx.complete_graph(3, create_using=graph_type)
G.add_edge(0, 0)
G.remove_edges_from(nx.selfloop_edges(G, keys=True))
G.add_edge(0, 0)
G.remove_edges_from(nx.selfloop_edges(G, data=True))
G.add_edge(0, 0)
G.remove_edges_from(nx.selfloop_edges(G, keys=True, data=True))
@pytest.mark.parametrize("graph_type", [nx.MultiGraph, nx.MultiDiGraph])
def test_selfloops_removal_multi(graph_type):
"""test removing selfloops behavior vis-a-vis altering a dict while iterating.
cf. gh-4068"""
G = nx.complete_graph(3, create_using=graph_type)
# Defaults - see gh-4080
G.add_edge(0, 0)
G.add_edge(0, 0)
G.remove_edges_from(nx.selfloop_edges(G))
assert (0, 0) not in G.edges()
# With keys
G.add_edge(0, 0)
G.add_edge(0, 0)
with pytest.raises(RuntimeError):
G.remove_edges_from(nx.selfloop_edges(G, keys=True))
# With data
G.add_edge(0, 0)
G.add_edge(0, 0)
with pytest.raises(TypeError):
G.remove_edges_from(nx.selfloop_edges(G, data=True))
# With keys and data
G.add_edge(0, 0)
G.add_edge(0, 0)
with pytest.raises(RuntimeError):
G.remove_edges_from(nx.selfloop_edges(G, data=True, keys=True))
def test_pathweight():
valid_path = [1, 2, 3]
invalid_path = [1, 3, 2]
graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]
edges = [
(1, 2, {"cost": 5, "dist": 6}),
(2, 3, {"cost": 3, "dist": 4}),
(1, 2, {"cost": 1, "dist": 2}),
]
for graph in graphs:
graph.add_edges_from(edges)
assert nx.path_weight(graph, valid_path, "cost") == 4
assert nx.path_weight(graph, valid_path, "dist") == 6
pytest.raises(nx.NetworkXNoPath, nx.path_weight, graph, invalid_path, "cost")
@pytest.mark.parametrize(
"G", (nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph())
)
def test_ispath(G):
G.add_edges_from([(1, 2), (2, 3), (1, 2), (3, 4)])
valid_path = [1, 2, 3, 4]
invalid_path = [1, 2, 4, 3] # wrong node order
another_invalid_path = [1, 2, 3, 4, 5] # contains node not in G
assert nx.is_path(G, valid_path)
assert not nx.is_path(G, invalid_path)
assert not nx.is_path(G, another_invalid_path)
@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph()))
def test_restricted_view(G):
G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)])
G.add_node(4)
H = nx.restricted_view(G, [0, 2, 5], [(1, 2), (3, 4)])
assert set(H.nodes()) == {1, 3, 4}
assert set(H.edges()) == {(1, 1)}
@pytest.mark.parametrize("G", (nx.MultiGraph(), nx.MultiDiGraph()))
def test_restricted_view_multi(G):
G.add_edges_from(
[(0, 1, 0), (0, 2, 0), (0, 3, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 2, 0)]
)
G.add_node(4)
H = nx.restricted_view(G, [0, 2, 5], [(1, 2, 0), (3, 4, 0)])
assert set(H.nodes()) == {1, 3, 4}
assert set(H.edges()) == {(1, 1)}
| TestCommonNeighbors |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 210671,
"end": 212870
} | class ____(SpecfileReaderBase):
SPEC_VERSION = 2
@classmethod
def load(cls, data):
result = cls._load(data)
reconstruct_virtuals_on_edges(result)
return result
@classmethod
def name_and_data(cls, node):
return node["name"], node
@classmethod
def dependencies_from_node_dict(cls, node):
return cls.read_specfile_dep_specs(node.get("dependencies", []))
@classmethod
def read_specfile_dep_specs(cls, deps, hash_type=ht.dag_hash.name):
"""Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.
"""
if not isinstance(deps, list):
raise spack.error.SpecError("Spec dictionary contains malformed dependencies")
result = []
for dep in deps:
elt = dep
dep_name = dep["name"]
if isinstance(elt, dict):
# new format: elements of dependency spec are keyed.
for h in ht.HASHES:
if h.name in elt:
dep_hash, deptypes, hash_type, virtuals, direct = (
cls.extract_info_from_dep(elt, h)
)
break
else: # We never determined a hash type...
raise spack.error.SpecError("Couldn't parse dependency spec.")
else:
raise spack.error.SpecError("Couldn't parse dependency types in spec.")
result.append((dep_name, dep_hash, list(deptypes), hash_type, list(virtuals), direct))
return result
@classmethod
def extract_info_from_dep(cls, elt, hash):
dep_hash, deptypes = elt[hash.name], elt["type"]
hash_type = hash.name
virtuals = []
direct = True
return dep_hash, deptypes, hash_type, virtuals, direct
@classmethod
def extract_build_spec_info_from_node_dict(cls, node, hash_type=ht.dag_hash.name):
build_spec_dict = node["build_spec"]
return build_spec_dict["name"], build_spec_dict[hash_type], hash_type
| SpecfileV2 |
python | gevent__gevent | src/greentest/3.9/test_ssl.py | {
"start": 202560,
"end": 208833
} | class ____(unittest.TestCase):
def keylog_lines(self, fname=support.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(support.unlink, support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(support.TESTFN))
ctx.keylog_filename = support.TESTFN
self.assertEqual(ctx.keylog_filename, support.TESTFN)
self.assertTrue(os.path.isfile(support.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(support.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(support.unlink, support.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = support.TESTFN
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(support.unlink, support.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = support.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
def set_socket_so_linger_on_with_zero_timeout(sock):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
| TestSSLDebug |
python | gwtw__py-sorting | test/merge_sort_bottom_up_test.py | {
"start": 415,
"end": 810
} | class ____(unittest.TestCase,
BaseCustomComparisonSortTest,
BasePositiveIntegerSortTest,
BaseNegativeIntegerSortTest,
BaseStringSortTest):
def setUp(self):
self.sort = merge_sort_bottom_up.sort
if __name__ == '__main__':
unittest.main()
| MergeSortBottomUpSortTest |
python | dagster-io__dagster | examples/experimental/assets_yaml_dsl/assets_yaml_dsl/domain_specific_dsl/stocks_dsl.py | {
"start": 1114,
"end": 1161
} | class ____(NamedTuple):
ticker: str
| StockInfo |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_embed_image07.py | {
"start": 315,
"end": 977
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("embed_image07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.embed_image(0, 0, self.image_dir + "red.png")
worksheet.embed_image(2, 0, self.image_dir + "blue.png")
worksheet.insert_image(8, 4, self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytransitions__transitions | tests/test_async.py | {
"start": 33380,
"end": 33728
} | class ____(TestHierarchicalAsync):
def setUp(self):
super(TestHierarchicalAsync, self).setUp()
self.machine_cls = HierarchicalAsyncGraphMachine # type: Type[HierarchicalAsyncGraphMachine]
self.machine = self.machine_cls(states=['A', 'B', 'C'], transitions=[['go', 'A', 'B']], initial='A')
| TestAsyncHierarchicalGraphMachine |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 652988,
"end": 653406
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("EnterpriseServerUserAccount", graphql_name="node")
"""The item at the end of the edge."""
| EnterpriseServerUserAccountEdge |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/utils.py | {
"start": 183,
"end": 1184
} | class ____(metaclass=abc.ABCMeta):
content_type: str = Field(
description="The MIME type of the input data in the request body.",
)
accept: str = Field(
description="The desired MIME type of the inference response from the model container.",
)
@classmethod
def __subclasshook__(cls, subclass: type) -> bool:
return (
hasattr(subclass, "content_type")
and hasattr(subclass, "accept")
and hasattr(subclass, "serialize_input")
and callable(subclass.serialize_input)
and hasattr(subclass, "deserialize_output")
and callable(subclass.deserialize_output)
or NotImplemented
)
@abc.abstractmethod
def serialize_input(self, request: List[str], model_kwargs: dict) -> bytes:
raise NotImplementedError
@abc.abstractmethod
def deserialize_output(self, response: "StreamingBody") -> List[List[float]]:
raise NotImplementedError
| BaseIOHandler |
python | PyCQA__pylint | doc/data/messages/i/invalid-field-call/good.py | {
"start": 207,
"end": 338
} | class ____:
a: float
b: float
c: float = field(init=False)
def __post_init__(self):
self.c = self.a + self.b
| C |
python | pypa__setuptools | setuptools/_distutils/compilers/C/base.py | {
"start": 1156,
"end": 54876
} | class ____:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type: ClassVar[str] = None # type: ignore[assignment]
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
executables: ClassVar[dict]
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions: ClassVar[list[str] | None] = None
obj_extension: ClassVar[str | None] = None
static_lib_extension: ClassVar[str | None] = None
shared_lib_extension: ClassVar[str | None] = None
static_lib_format: ClassVar[str | None] = None # format string
shared_lib_format: ClassVar[str | None] = None # prob. same as static_lib_format
exe_extension: ClassVar[str | None] = None
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map: ClassVar[dict[str, str]] = {
".c": "c",
".cc": "c++",
".cpp": "c++",
".cxx": "c++",
".m": "objc",
}
language_order: ClassVar[list[str]] = ["c++", "objc", "c"]
include_dirs: list[str] = []
"""
include dirs specific to this compiler class
"""
library_dirs: list[str] = []
"""
library dirs specific to this compiler class
"""
def __init__(
self, verbose: bool = False, dry_run: bool = False, force: bool = False
) -> None:
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir: str | None = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros: list[_Macro] = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries: list[str] = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs: list[str] = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects: list[str] = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **kwargs: str) -> None:
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in kwargs:
if key not in self.executables:
raise ValueError(
f"unknown executable '{key}' for class {self.__class__.__name__}"
)
self.set_executable(key, kwargs[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i += 1
return None
def _check_macro_definitions(self, definitions):
"""Ensure that every element of 'definitions' is valid."""
for defn in definitions:
self._check_macro_definition(*defn)
def _check_macro_definition(self, defn):
"""
Raise a TypeError if defn is not valid.
A valid definition is either a (name, value) 2-tuple or a (name,) tuple.
"""
if not isinstance(defn, tuple) or not self._is_valid_macro(*defn):
raise TypeError(
f"invalid macro definition '{defn}': "
"must be tuple (string,), (string, string), or (string, None)"
)
@staticmethod
def _is_valid_macro(name, value=None):
"""
A valid macro is a ``name : str`` and a ``value : str | None``.
>>> Compiler._is_valid_macro('foo', None)
True
"""
return isinstance(name, str) and isinstance(value, (str, type(None)))
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name: str, value: str | None = None) -> None:
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro(name)
if i is not None:
del self.macros[i]
self.macros.append((name, value))
def undefine_macro(self, name: str) -> None:
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro(name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn)
def add_include_dir(self, dir: str) -> None:
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append(dir)
def set_include_dirs(self, dirs: list[str]) -> None:
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname: str) -> None:
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append(libname)
def set_libraries(self, libnames: list[str]) -> None:
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir: str) -> None:
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs: list[str]) -> None:
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir: str) -> None:
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs: list[str]) -> None:
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object: str) -> None:
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects: list[str]) -> None:
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(
self,
outdir: str | None,
macros: list[_Macro] | None,
incdirs: list[str] | tuple[str, ...] | None,
sources,
depends,
extra,
):
"""Process arguments and decide which source files to compile."""
outdir, macros, incdirs = self._fix_compile_args(outdir, macros, incdirs)
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=False, output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(
self,
output_dir: str | None,
macros: list[_Macro] | None,
include_dirs: list[str] | tuple[str, ...] | None,
) -> tuple[str, list[_Macro], list[str]]:
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = list(self.macros)
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if include_dirs is None:
include_dirs = list(self.include_dirs)
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list(include_dirs) + (self.include_dirs or [])
else:
raise TypeError("'include_dirs' (if supplied) must be a list of strings")
# add include dirs for class
include_dirs += self.__class__.include_dirs
return output_dir, macros, include_dirs
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which source files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args(
self, objects: list[str] | tuple[str, ...], output_dir: str | None
) -> tuple[list[str], str]:
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError("'objects' must be a list or tuple of strings")
objects = list(objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
return (objects, output_dir)
def _fix_lib_args(
self,
libraries: list[str] | tuple[str, ...] | None,
library_dirs: list[str] | tuple[str, ...] | None,
runtime_library_dirs: list[str] | tuple[str, ...] | None,
) -> tuple[list[str], list[str], list[str]]:
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = list(self.libraries)
elif isinstance(libraries, (list, tuple)):
libraries = list(libraries) + (self.libraries or [])
else:
raise TypeError("'libraries' (if supplied) must be a list of strings")
if library_dirs is None:
library_dirs = list(self.library_dirs)
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list(library_dirs) + (self.library_dirs or [])
else:
raise TypeError("'library_dirs' (if supplied) must be a list of strings")
# add library dirs for class
library_dirs += self.__class__.library_dirs
if runtime_library_dirs is None:
runtime_library_dirs = list(self.runtime_library_dirs)
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = list(runtime_library_dirs) + (
self.runtime_library_dirs or []
)
else:
raise TypeError(
"'runtime_library_dirs' (if supplied) must be a list of strings"
)
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return True
else:
if self.dry_run:
newer = newer_group(objects, output_file, missing='newer')
else:
newer = newer_group(objects, output_file)
return newer
def detect_language(self, sources: str | list[str]) -> str | None:
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(
self,
source: str | os.PathLike[str],
output_file: str | os.PathLike[str] | None = None,
macros: list[_Macro] | None = None,
include_dirs: list[str] | tuple[str, ...] | None = None,
extra_preargs: list[str] | None = None,
extra_postargs: Iterable[str] | None = None,
):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(
self,
sources: Sequence[str | os.PathLike[str]],
output_dir: str | None = None,
macros: list[_Macro] | None = None,
include_dirs: list[str] | tuple[str, ...] | None = None,
debug: bool = False,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
depends: list[str] | tuple[str, ...] | None = None,
) -> list[str]:
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(
self,
objects: list[str] | tuple[str, ...],
output_libname: str,
output_dir: str | None = None,
debug: bool = False,
target_lang: str | None = None,
) -> None:
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(
self,
target_desc: str,
objects: list[str] | tuple[str, ...],
output_filename: str,
output_dir: str | None = None,
libraries: list[str] | tuple[str, ...] | None = None,
library_dirs: list[str] | tuple[str, ...] | None = None,
runtime_library_dirs: list[str] | tuple[str, ...] | None = None,
export_symbols: Iterable[str] | None = None,
debug: bool = False,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | os.PathLike[str] | None = None,
target_lang: str | None = None,
):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(
self,
objects: list[str] | tuple[str, ...],
output_libname: str,
output_dir: str | None = None,
libraries: list[str] | tuple[str, ...] | None = None,
library_dirs: list[str] | tuple[str, ...] | None = None,
runtime_library_dirs: list[str] | tuple[str, ...] | None = None,
export_symbols: Iterable[str] | None = None,
debug: bool = False,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | os.PathLike[str] | None = None,
target_lang: str | None = None,
):
self.link(
Compiler.SHARED_LIBRARY,
objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols,
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang,
)
def link_shared_object(
self,
objects: list[str] | tuple[str, ...],
output_filename: str,
output_dir: str | None = None,
libraries: list[str] | tuple[str, ...] | None = None,
library_dirs: list[str] | tuple[str, ...] | None = None,
runtime_library_dirs: list[str] | tuple[str, ...] | None = None,
export_symbols: Iterable[str] | None = None,
debug: bool = False,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | os.PathLike[str] | None = None,
target_lang: str | None = None,
):
self.link(
Compiler.SHARED_OBJECT,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols,
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang,
)
def link_executable(
self,
objects: list[str] | tuple[str, ...],
output_progname: str,
output_dir: str | None = None,
libraries: list[str] | tuple[str, ...] | None = None,
library_dirs: list[str] | tuple[str, ...] | None = None,
runtime_library_dirs: list[str] | tuple[str, ...] | None = None,
debug: bool = False,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
target_lang: str | None = None,
):
self.link(
Compiler.EXECUTABLE,
objects,
self.executable_filename(output_progname),
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None,
debug,
extra_preargs,
extra_postargs,
None,
target_lang,
)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir: str) -> str:
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir: str) -> str:
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib: str) -> str:
"""Return the compiler option to add 'lib' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function( # noqa: C901
self,
funcname: str,
includes: Iterable[str] | None = None,
include_dirs: list[str] | tuple[str, ...] | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | tuple[str, ...] | None = None,
) -> bool:
"""Return a boolean indicating whether funcname is provided as
a symbol on the current platform. The optional arguments can
be used to augment the compilation environment.
The libraries argument is a list of flags to be passed to the
linker to make additional symbol definitions available for
linking.
The includes and include_dirs arguments are deprecated.
Usually, supplying include files with function declarations
will cause function detection to fail even in cases where the
symbol is available for linking.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
else:
warnings.warn("includes is deprecated", DeprecationWarning)
if include_dirs is None:
include_dirs = []
else:
warnings.warn("include_dirs is deprecated", DeprecationWarning)
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
with os.fdopen(fd, "w", encoding='utf-8') as f:
for incl in includes:
f.write(f"""#include "{incl}"\n""")
if not includes:
# Use "char func(void);" as the prototype to follow
# what autoconf does. This prototype does not match
# any well-known function the compiler might recognize
# as a builtin, so this ends up as a true link test.
# Without a fake prototype, the test would need to
# know the exact argument types, and the has_function
# interface does not provide that level of information.
f.write(
f"""\
#ifdef __cplusplus
extern "C"
#endif
char {funcname}(void);
"""
)
f.write(
f"""\
int main (int argc, char **argv) {{
{funcname}();
return 0;
}}
"""
)
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
finally:
os.remove(fname)
try:
self.link_executable(
objects, "a.out", libraries=libraries, library_dirs=library_dirs
)
except (LinkError, TypeError):
return False
else:
os.remove(
self.executable_filename("a.out", output_dir=self.output_dir or '')
)
finally:
for fn in objects:
os.remove(fn)
return True
def find_library_file(
self, dirs: Iterable[str], lib: str, debug: bool = False
) -> str | None:
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(
self,
source_filenames: Iterable[str | os.PathLike[str]],
strip_dir: bool = False,
output_dir: str | os.PathLike[str] | None = '',
) -> list[str]:
if output_dir is None:
output_dir = ''
return list(
self._make_out_path(output_dir, strip_dir, src_name)
for src_name in source_filenames
)
@property
def out_extensions(self):
return dict.fromkeys(self.src_extensions, self.obj_extension)
def _make_out_path(self, output_dir, strip_dir, src_name):
return self._make_out_path_exts(
output_dir, strip_dir, src_name, self.out_extensions
)
@classmethod
def _make_out_path_exts(cls, output_dir, strip_dir, src_name, extensions):
r"""
>>> exts = {'.c': '.o'}
>>> Compiler._make_out_path_exts('.', False, '/foo/bar.c', exts).replace('\\', '/')
'./foo/bar.o'
>>> Compiler._make_out_path_exts('.', True, '/foo/bar.c', exts).replace('\\', '/')
'./bar.o'
"""
src = pathlib.PurePath(src_name)
# Ensure base is relative to honor output_dir (python/cpython#37775).
base = cls._make_relative(src)
try:
new_ext = extensions[src.suffix]
except LookupError:
raise UnknownFileType(f"unknown file type '{src.suffix}' (from '{src}')")
if strip_dir:
base = pathlib.PurePath(base.name)
return os.path.join(output_dir, base.with_suffix(new_ext))
@staticmethod
def _make_relative(base: pathlib.Path):
return base.relative_to(base.anchor)
@overload
def shared_object_filename(
self,
basename: str,
strip_dir: Literal[False] = False,
output_dir: str | os.PathLike[str] = "",
) -> str: ...
@overload
def shared_object_filename(
self,
basename: str | os.PathLike[str],
strip_dir: Literal[True],
output_dir: str | os.PathLike[str] = "",
) -> str: ...
def shared_object_filename(
self,
basename: str | os.PathLike[str],
strip_dir: bool = False,
output_dir: str | os.PathLike[str] = '',
) -> str:
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
@overload
def executable_filename(
self,
basename: str,
strip_dir: Literal[False] = False,
output_dir: str | os.PathLike[str] = "",
) -> str: ...
@overload
def executable_filename(
self,
basename: str | os.PathLike[str],
strip_dir: Literal[True],
output_dir: str | os.PathLike[str] = "",
) -> str: ...
def executable_filename(
self,
basename: str | os.PathLike[str],
strip_dir: bool = False,
output_dir: str | os.PathLike[str] = '',
) -> str:
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(
self,
libname: str,
lib_type: str = "static",
strip_dir: bool = False,
output_dir: str | os.PathLike[str] = "", # or 'shared'
):
assert output_dir is not None
expected = '"static", "shared", "dylib", "xcode_stub"'
if lib_type not in eval(expected):
raise ValueError(f"'lib_type' must be {expected}")
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split(libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg: object, level: int = 1) -> None:
log.debug(msg)
def debug_print(self, msg: object) -> None:
from distutils.debug import DEBUG
if DEBUG:
print(msg)
def warn(self, msg: object) -> None:
sys.stderr.write(f"warning: {msg}\n")
def execute(
self,
func: Callable[[Unpack[_Ts]], object],
args: tuple[Unpack[_Ts]],
msg: object = None,
level: int = 1,
) -> None:
execute(func, args, msg, self.dry_run)
def spawn(
self, cmd: MutableSequence[bytes | str | os.PathLike[str]], **kwargs
) -> None:
spawn(cmd, dry_run=self.dry_run, **kwargs)
@overload
def move_file(
self, src: str | os.PathLike[str], dst: _StrPathT
) -> _StrPathT | str: ...
@overload
def move_file(
self, src: bytes | os.PathLike[bytes], dst: _BytesPathT
) -> _BytesPathT | bytes: ...
def move_file(
self,
src: str | os.PathLike[str] | bytes | os.PathLike[bytes],
dst: str | os.PathLike[str] | bytes | os.PathLike[bytes],
) -> str | os.PathLike[str] | bytes | os.PathLike[bytes]:
return move_file(src, dst, dry_run=self.dry_run)
def mkpath(self, name, mode=0o777):
mkpath(name, mode, dry_run=self.dry_run)
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('zos', 'zos'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
)
def get_default_compiler(osname: str | None = None, platform: str | None = None) -> str:
"""Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
# Mingw is a special case where sys.platform is 'win32' but we
# want to use the 'mingw32' compiler, so check it first
if is_mingw():
return 'mingw32'
for pattern, compiler in _default_compilers:
if (
re.match(pattern, platform) is not None
or re.match(pattern, osname) is not None
):
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = {
'unix': ('unixccompiler', 'UnixCCompiler', "standard UNIX-style compiler"),
'msvc': ('_msvccompiler', 'MSVCCompiler', "Microsoft Visual C++"),
'cygwin': (
'cygwinccompiler',
'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32",
),
'mingw32': (
'cygwinccompiler',
'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32",
),
'bcpp': ('bcppcompiler', 'BCPPCompiler', "Borland C++ Compiler"),
'zos': ('zosccompiler', 'zOSCCompiler', 'IBM XL C/C++ Compilers'),
}
def show_compilers() -> None:
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = sorted(
("compiler=" + compiler, None, compiler_class[compiler][2])
for compiler in compiler_class.keys()
)
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(
plat: str | None = None,
compiler: str | None = None,
verbose: bool = False,
dry_run: bool = False,
force: bool = False,
) -> Compiler:
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = f"don't know how to compile C/C++ code on platform '{plat}'"
if compiler is not None:
msg = msg + f" with '{compiler}' compiler"
raise DistutilsPlatformError(msg)
try:
module_name = "distutils." + module_name
__import__(module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError(
f"can't compile C/C++ code: unable to load module '{module_name}'"
)
except KeyError:
raise DistutilsModuleError(
f"can't compile C/C++ code: unable to find class '{class_name}' "
f"in module '{module_name}'"
)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(
macros: Iterable[_Macro], include_dirs: Iterable[str]
) -> list[str]:
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
raise TypeError(
f"bad macro definition '{macro}': "
"each element of 'macros' list must be a 1- or 2-tuple"
)
if len(macro) == 1: # undefine this macro
pp_opts.append(f"-U{macro[0]}")
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append(f"-D{macro[0]}")
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append("-D{}={}".format(*macro))
pp_opts.extend(f"-I{dir}" for dir in include_dirs)
return pp_opts
def gen_lib_options(
compiler: Compiler,
library_dirs: Iterable[str],
runtime_library_dirs: Iterable[str],
libraries: Iterable[str],
) -> list[str]:
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = [compiler.library_dir_option(dir) for dir in library_dirs]
for dir in runtime_library_dirs:
lib_opts.extend(always_iterable(compiler.runtime_library_dir_option(dir)))
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn(
f"no library file corresponding to '{lib}' found (skipping)"
)
else:
lib_opts.append(compiler.library_option(lib))
return lib_opts
| Compiler |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 10361,
"end": 14668
} | class ____(ResolverBase):
def test_domain_resolver(self):
url = self.resolver.get_domain_without_protocol(project=self.pip)
self.assertEqual(url, "pip.readthedocs.org")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
)
def test_domain_resolver_with_domain_object(self):
self.domain = fixture.get(
Domain,
domain="docs.foobar.com",
project=self.pip,
canonical=True,
https=False,
)
url = Resolver().get_domain_without_protocol(project=self.pip)
self.assertEqual(url, "docs.foobar.com")
url = Resolver().get_domain_without_protocol(
project=self.pip, use_canonical_domain=False
)
self.assertEqual(url, "pip.readthedocs.io")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
)
def test_domain_resolver_subproject(self):
url = self.resolver.get_domain_without_protocol(project=self.subproject)
self.assertEqual(url, "pip.readthedocs.io")
url = self.resolver.get_domain_without_protocol(
project=self.subproject, use_canonical_domain=False
)
self.assertEqual(url, "pip.readthedocs.io")
def test_domain_resolver_subproject_itself(self):
"""
Test inconsistent project/subproject relationship.
If a project is subproject of itself (inconsistent relationship) we
still resolves the proper domain.
"""
# remove all possible subproject relationships
self.pip.subprojects.all().delete()
# add the project as subproject of itself
self.pip.add_subproject(self.pip)
url = self.resolver.get_domain_without_protocol(project=self.pip)
self.assertEqual(url, "pip.readthedocs.org")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
)
def test_domain_resolver_translation(self):
url = self.resolver.get_domain_without_protocol(project=self.translation)
self.assertEqual(url, "pip.readthedocs.io")
url = self.resolver.get_domain_without_protocol(
project=self.translation, use_canonical_domain=False
)
self.assertEqual(url, "pip.readthedocs.io")
def test_domain_resolver_translation_itself(self):
"""
Test inconsistent project/translation relationship.
If a project is a translation of itself (inconsistent relationship) we
still resolves the proper domain.
"""
# remove all possible translations relationships
self.pip.translations.all().delete()
# add the project as subproject of itself
self.pip.translations.add(self.pip)
url = self.resolver.get_domain_without_protocol(project=self.pip)
self.assertEqual(url, "pip.readthedocs.org")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="public.readthedocs.org",
)
def test_domain_public(self):
url = self.resolver.get_domain_without_protocol(project=self.translation)
self.assertEqual(url, "pip.public.readthedocs.org")
url = self.resolver.get_domain_without_protocol(
project=self.translation, use_canonical_domain=False
)
self.assertEqual(url, "pip.public.readthedocs.org")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="public.readthedocs.org",
RTD_EXTERNAL_VERSION_DOMAIN="dev.readthedocs.build",
PUBLIC_DOMAIN_USES_HTTPS=True,
)
def test_domain_external(self):
latest = self.pip.versions.first()
latest.type = EXTERNAL
latest.save()
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "https://pip--latest.dev.readthedocs.build/en/latest/")
url = self.resolver.resolve(project=self.pip, version_slug=latest.slug)
self.assertEqual(url, "https://pip--latest.dev.readthedocs.build/en/latest/")
url = self.resolver.resolve(project=self.pip, version_slug="non-external")
self.assertEqual(url, "https://pip.public.readthedocs.org/en/non-external/")
| ResolverDomainTests |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 24358,
"end": 25658
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig, hidden_size: int, intermediate_size: int):
super().__init__()
self.out_conv1 = nn.Conv2d(hidden_size, intermediate_size, kernel_size=1, stride=1, padding=0, bias=False)
self.out_conv2 = nn.Conv2d(
intermediate_size, intermediate_size, kernel_size=3, stride=1, padding=1, bias=False
)
self.batch_norm = nn.BatchNorm2d(intermediate_size)
self.activation = ACT2CLS[config.mlp_activation_function]()
self.out_conv3 = nn.Conv2d(intermediate_size, hidden_size, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, hidden_states: torch.Tensor, residual_states: torch.Tensor) -> torch.Tensor:
residual_states = self.out_conv1(residual_states)
residual_states = residual_states + hidden_states
residual_states = self.out_conv2(residual_states)
residual_states = self.batch_norm(residual_states)
residual_states = self.activation(residual_states)
residual_states = self.out_conv3(residual_states)
residual_states = nn.functional.interpolate(
residual_states, scale_factor=2.0, mode="bilinear", align_corners=False
)
return residual_states
| EfficientLoFTROutConvBlock |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 9964,
"end": 10269
} | class ____(EnumSerializer):
def unpack(self, value: str):
try:
RunFailureReason(value)
except ValueError:
return RunFailureReason.UNKNOWN
return super().unpack(value)
@whitelist_for_serdes(serializer=RunFailureReasonSerializer)
| RunFailureReasonSerializer |
python | redis__redis-py | tests/test_asyncio/test_search.py | {
"start": 45999,
"end": 52604
} | class ____(AsyncSearchTestsBase):
@pytest.mark.redismod
@pytest.mark.onlynoncluster
# NOTE(imalinovskyi): This test contains hardcoded scores valid only for RediSearch 2.8+
@skip_ifmodversion_lt("2.8.0", "search")
@skip_if_server_version_gte("7.9.0")
async def test_scorer(self, decoded_r: redis.Redis):
await decoded_r.ft().create_index((TextField("description"),))
await decoded_r.hset(
"doc1",
mapping={"description": "The quick brown fox jumps over the lazy dog"},
)
await decoded_r.hset(
"doc2",
mapping={
"description": "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do." # noqa
},
)
if is_resp2_connection(decoded_r):
# default scorer is TFIDF
res = await decoded_r.ft().search(Query("quick").with_scores())
assert 1.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF").with_scores()
)
assert 1.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF.DOCNORM").with_scores()
)
assert 0.14285714285714285 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("BM25").with_scores()
)
assert 0.22471909420069797 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("DISMAX").with_scores()
)
assert 2.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res.docs[0].score
else:
res = await decoded_r.ft().search(Query("quick").with_scores())
assert 1.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF").with_scores()
)
assert 1.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF.DOCNORM").with_scores()
)
assert 0.14285714285714285 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("BM25").with_scores()
)
assert 0.22471909420069797 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("DISMAX").with_scores()
)
assert 2.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res["results"][0]["score"]
@pytest.mark.redismod
@pytest.mark.onlynoncluster
# NOTE(imalinovskyi): This test contains hardcoded scores valid only for RediSearch 2.8+
@skip_ifmodversion_lt("2.8.0", "search")
@skip_if_server_version_lt("7.9.0")
async def test_scorer_with_new_default_scorer(self, decoded_r: redis.Redis):
await decoded_r.ft().create_index((TextField("description"),))
await decoded_r.hset(
"doc1",
mapping={"description": "The quick brown fox jumps over the lazy dog"},
)
await decoded_r.hset(
"doc2",
mapping={
"description": "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do." # noqa
},
)
if is_resp2_connection(decoded_r):
# default scorer is BM25STD
res = await decoded_r.ft().search(Query("quick").with_scores())
assert 0.23 == pytest.approx(res.docs[0].score, 0.05)
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF").with_scores()
)
assert 1.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF.DOCNORM").with_scores()
)
assert 0.14285714285714285 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("BM25").with_scores()
)
assert 0.22471909420069797 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("DISMAX").with_scores()
)
assert 2.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res.docs[0].score
res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res.docs[0].score
else:
res = await decoded_r.ft().search(Query("quick").with_scores())
assert 0.23 == pytest.approx(res["results"][0]["score"], 0.05)
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF").with_scores()
)
assert 1.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("TFIDF.DOCNORM").with_scores()
)
assert 0.14285714285714285 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("BM25").with_scores()
)
assert 0.22471909420069797 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("DISMAX").with_scores()
)
assert 2.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("DOCSCORE").with_scores()
)
assert 1.0 == res["results"][0]["score"]
res = await decoded_r.ft().search(
Query("quick").scorer("HAMMING").with_scores()
)
assert 0.0 == res["results"][0]["score"]
| TestScorers |
python | dask__distributed | distributed/comm/registry.py | {
"start": 165,
"end": 375
} | class ____(Protocol):
def __call__(self, **kwargs: str) -> Iterable[importlib.metadata.EntryPoint]: ...
_entry_points: _EntryPoints = importlib.metadata.entry_points # type: ignore[assignment]
| _EntryPoints |
python | mamba-org__mamba | micromamba/tests/test_activation.py | {
"start": 1697,
"end": 33130
} | class ____:
def __getitem__(self, shell: str) -> str:
if shell == "powershell":
# find powershell profile path dynamically
args = [
powershell_cmd,
"-NoProfile",
"-Command",
"$PROFILE.CurrentUserAllHosts",
]
res = subprocess.run(args, capture_output=True, check=True)
return res.stdout.decode("utf-8").strip()
elif shell == "cmd.exe":
return None
raise KeyError(f"Invalid shell: {shell}")
paths = {
"win": WindowsProfiles(),
"osx": {
"zsh": "~/.zshrc",
"bash": "~/.bash_profile",
"xonsh": "~/.xonshrc",
"tcsh": "~/.tcshrc",
"fish": "~/.config/fish/config.fish",
"nu": "~/.config/nushell/config.nu",
},
"linux": {
"zsh": "~/.zshrc",
"bash": "~/.bashrc",
"xonsh": "~/.xonshrc",
"tcsh": "~/.tcshrc",
"fish": "~/.config/fish/config.fish",
"nu": "~/.config/nushell/config.nu",
},
}
def xonsh_shell_args(interpreter):
# In macOS, the parent process name is "Python" and not "xonsh" like in Linux.
# Thus, we need to specify the shell explicitly.
return "-s xonsh" if interpreter == "xonsh" and plat == "osx" else ""
def extract_vars(vxs, interpreter):
return [f"echo {v}={shvar(v, interpreter)}" for v in vxs]
def write_script(interpreter, lines, path):
fname = os.path.join(path, "script" + suffixes[interpreter])
if plat == "win":
if interpreter == "powershell":
with open(fname, "w", encoding="utf-8-sig") as fo:
fo.write("\n".join(lines) + "\n")
else:
with open(fname, "w", encoding="utf-8") as fo:
fo.write("\n".join(lines) + "\n")
else:
with open(fname, "w") as fo:
fo.write("\n".join(lines) + "\n")
return fname
possible_interpreters = {
"win": {"powershell", "cmd.exe", "bash", "nu"},
"unix": {"bash", "zsh", "fish", "xonsh", "tcsh", "nu"},
}
regkey = "HKEY_CURRENT_USER\\Software\\Microsoft\\Command Processor\\AutoRun"
@pytest.fixture
def winreg_value():
if plat == "win":
try:
saved_winreg_value = helpers.read_windows_registry(regkey)
except Exception:
print("Could not read registry value")
saved_winreg_value = ("", 1)
new_winreg_value = ("", saved_winreg_value[1])
print("setting registry to ", new_winreg_value)
helpers.write_windows_registry(regkey, *new_winreg_value)
yield new_winreg_value
print("setting registry to ", saved_winreg_value)
helpers.write_windows_registry(regkey, *saved_winreg_value)
else:
yield None
def find_path_in_str(p, s):
if isinstance(p, Path):
p = str(p)
if p in s:
return True
if p.replace("\\", "\\\\") in s:
return True
return False
def format_path(p, interpreter):
if plat == "win" and interpreter == "bash":
return str(PurePosixPath(PureWindowsPath(p)))
else:
return str(p)
def call_interpreter(s, tmp_path, interpreter, interactive=False, env=None):
if interactive and interpreter == "powershell":
# "Get-Content -Path $PROFILE.CurrentUserAllHosts | Invoke-Expression"
s = [". $PROFILE.CurrentUserAllHosts"] + s
if interactive and interpreter == "bash" and plat == "linux":
s = ["source ~/.bashrc"] + s
if interpreter == "cmd.exe":
mods = ["@chcp 65001>nul"]
umamba = helpers.get_umamba()
mamba_name = Path(umamba).stem
for x in s:
if x.startswith(f"{mamba_name} activate") or x.startswith(f"{mamba_name} deactivate"):
mods.append("call " + x)
else:
mods.append(x)
s = mods
f = write_script(interpreter, s, tmp_path)
if interpreter not in possible_interpreters[running_os]:
return None, None
if interpreter == "cmd.exe":
args = ["cmd.exe", "/Q", "/C", f]
elif interpreter == "powershell":
args = [powershell_cmd, "-NoProfile", "-ExecutionPolicy", "Bypass", "-File", f]
elif interpreter == "bash" and plat == "win":
args = [os.path.join(os.environ["PROGRAMFILES"], "Git", "bin", "bash.exe"), f]
else:
args = [interpreter, f]
if interactive:
args.insert(1, "-i")
if interactive and interpreter == "bash":
args.insert(1, "-l")
try:
res = subprocess.run(
args,
capture_output=True,
check=True,
env=env,
encoding="utf-8",
)
except subprocess.CalledProcessError as e:
stdout = e.stdout.strip()
stderr = e.stderr.strip()
try:
print(stdout)
print(stderr)
except Exception:
pass
if interpreter == "cmd.exe":
if stdout.startswith("'") and stdout.endswith("'"):
stdout = stdout[1:-1]
e.stdout = stdout
e.stderr = stderr
raise e
except Exception as e:
raise e
stdout = res.stdout.strip()
stderr = res.stderr.strip()
try:
print(stdout)
print(stderr)
except Exception:
pass
if interpreter == "cmd.exe":
if stdout.startswith("'") and stdout.endswith("'"):
stdout = stdout[1:-1]
return stdout, stderr
def get_interpreters(exclude=None):
if exclude is None:
exclude = []
return [x for x in possible_interpreters[running_os] if x not in exclude]
def get_valid_interpreters():
valid_interpreters = []
s = ["echo 'hello world'"]
with tempfile.TemporaryDirectory() as tmpdirname:
for interpreter in possible_interpreters[running_os]:
try:
stdout, _ = call_interpreter(s, tmpdirname, interpreter)
assert stdout == "hello world"
valid_interpreters.append(interpreter)
except Exception:
pass
return valid_interpreters
valid_interpreters = get_valid_interpreters()
def get_self_update_interpreters():
if plat == "win":
return ["cmd.exe", "powershell", "bash"]
if plat == "osx":
return ["zsh", "bash"]
else:
return ["bash"]
def shvar(v, interpreter):
if interpreter in ["bash", "zsh", "xonsh", "fish", "tcsh", "dash"]:
return f"${v}"
elif interpreter == "powershell":
return f"$Env:{v}"
elif interpreter == "cmd.exe":
return f"%{v}%"
elif interpreter == "nu":
return f"$env.{v}"
def env_to_dict(out, interpreter="bash"):
if interpreter == "cmd.exe":
with open(out) as f:
out = f.read()
if interpreter == "fish":
return {
v.split(" ", maxsplit=1)[0]: v.split(" ", maxsplit=1)[1]
for _, _, v in [x.partition("set -gx ") for x in out.splitlines()]
}
elif interpreter in ["csh", "tcsh"]:
res = {}
for line in out.splitlines():
line = line.removesuffix(";")
if line.startswith("set "):
k, v = line.split(" ")[1].split("=")
elif line.startswith("setenv "):
_, k, v = line.strip().split(maxsplit=2)
res[k] = v
return res
else:
return {k: v for k, _, v in [x.partition("=") for x in out.splitlines()]}
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_shell_init(
tmp_home,
winreg_value,
tmp_root_prefix,
tmp_path,
interpreter,
):
# TODO enable these tests also on win + bash!
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
run_dir = tmp_path / "rundir"
run_dir.mkdir()
def call(s):
return call_interpreter(s, run_dir, interpreter)
rpv = shvar("MAMBA_ROOT_PREFIX", interpreter)
s = [f"echo {rpv}"]
stdout, stderr = call(s)
assert stdout == str(tmp_root_prefix)
s = [f"{umamba} shell init -r {rpv} {xonsh_shell_args(interpreter)}"]
stdout, stderr = call(s)
if interpreter == "cmd.exe":
value = helpers.read_windows_registry(regkey)
assert "mamba_hook.bat" in value[0]
assert find_path_in_str(tmp_root_prefix, value[0])
prev_text = value[0]
else:
path = Path(paths[plat][interpreter]).expanduser()
with open(path) as fi:
x = fi.read()
assert "mamba" in x
assert find_path_in_str(tmp_root_prefix, x)
prev_text = x
s = [f"{umamba} shell init -r {rpv} {xonsh_shell_args(interpreter)}"]
stdout, stderr = call(s)
if interpreter == "cmd.exe":
value = helpers.read_windows_registry(regkey)
assert "mamba_hook.bat" in value[0]
assert find_path_in_str(tmp_root_prefix, value[0])
assert prev_text == value[0]
assert "&" not in value[0]
else:
with open(path) as fi:
x = fi.read()
assert "mamba" in x
assert prev_text == x
if interpreter == "cmd.exe":
helpers.write_windows_registry(regkey, "echo 'test'", winreg_value[1])
s = [f"{umamba} shell init -r {rpv}"]
stdout, stderr = call(s)
value = helpers.read_windows_registry(regkey)
assert "mamba_hook.bat" in value[0]
assert find_path_in_str(tmp_root_prefix, value[0])
assert value[0].startswith("echo 'test' & ")
assert "&" in value[0]
if interpreter != "cmd.exe":
with open(path) as fi:
prevlines = fi.readlines()
with open(path, "w") as fo:
text = "\n".join(
["", "", "echo 'hihi'", ""]
+ [x.rstrip("\n\r") for x in prevlines]
+ ["", "", "echo 'hehe'"]
)
fo.write(text)
s = [f"{umamba} shell init -r {rpv}"]
stdout, stderr = call(s)
with open(path) as fi:
x = fi.read()
assert "mamba" in x
assert text == x
other_root_prefix = tmp_path / "prefix"
other_root_prefix.mkdir()
s = [f"{umamba} shell init -r {other_root_prefix} {xonsh_shell_args(interpreter)}"]
stdout, stderr = call(s)
if interpreter == "cmd.exe":
x = helpers.read_windows_registry(regkey)[0]
assert "mamba" in x
assert find_path_in_str(other_root_prefix, x)
assert not find_path_in_str(tmp_root_prefix, x)
else:
with open(path) as fi:
x = fi.read()
assert "mamba" in x
assert find_path_in_str(other_root_prefix, x)
assert not find_path_in_str(tmp_root_prefix, x)
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_shell_init_deinit_root_prefix_files(
tmp_home,
tmp_root_prefix,
tmp_path,
interpreter,
):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
if interpreter == "bash" or interpreter == "zsh":
files = [tmp_root_prefix / "etc" / "profile.d" / "mamba.sh"]
elif interpreter == "cmd.exe":
files = [
tmp_root_prefix / "condabin" / "mamba_hook.bat",
tmp_root_prefix / "condabin" / "mamba.bat",
tmp_root_prefix / "condabin" / "_mamba_activate.bat",
tmp_root_prefix / "condabin" / "activate.bat",
]
elif interpreter == "powershell":
files = [
tmp_root_prefix / "condabin" / "mamba_hook.ps1",
tmp_root_prefix / "condabin" / "Mamba.psm1",
]
elif interpreter == "fish":
files = [tmp_root_prefix / "etc" / "fish" / "conf.d" / "mamba.fish"]
elif interpreter == "xonsh":
files = [tmp_root_prefix / "etc" / "profile.d" / "mamba.xsh"]
elif interpreter in ["csh", "tcsh"]:
files = [tmp_root_prefix / "etc" / "profile.d" / "mamba.csh"]
elif interpreter == "nu":
files = [] # moved to ~/.config/nushell.nu controlled by mamba activation
else:
raise ValueError(f"Unknown shell {interpreter}")
def call(command):
return call_interpreter(command, tmp_path, interpreter)
s = [f"{umamba} shell init -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
for file in files:
assert file.exists()
s = [f"{umamba} shell deinit -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
for file in files:
assert not file.exists()
def test_shell_init_deinit_contents_cmdexe(
tmp_home,
winreg_value,
tmp_root_prefix,
tmp_path,
):
interpreter = "cmd.exe"
if interpreter not in valid_interpreters:
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
def call(command):
return call_interpreter(command, tmp_path, interpreter)
prev_value = helpers.read_windows_registry(regkey)
assert "mamba_hook.bat" not in prev_value[0]
assert not find_path_in_str(tmp_root_prefix, prev_value[0])
s = [f"{umamba} shell init -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
value_after_init = helpers.read_windows_registry(regkey)
assert "mamba_hook.bat" in value_after_init[0]
assert find_path_in_str(tmp_root_prefix, value_after_init[0])
s = [f"{umamba} shell deinit -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
value_after_deinit = helpers.read_windows_registry(regkey)
assert value_after_deinit == prev_value
@pytest.mark.parametrize("interpreter", get_interpreters(exclude=["cmd.exe"]))
def test_shell_init_deinit_contents(
tmp_home,
tmp_root_prefix,
tmp_path,
interpreter,
):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
def call(command):
return call_interpreter(command, tmp_path, interpreter)
path = Path(paths[plat][interpreter]).expanduser()
if os.path.exists(path):
with open(path) as fi:
prev_rc_contents = fi.read()
else:
prev_rc_contents = ""
if interpreter == "powershell":
assert "#region mamba initialize" not in prev_rc_contents
else:
assert "# >>> mamba initialize >>>" not in prev_rc_contents
assert not find_path_in_str(tmp_root_prefix, prev_rc_contents)
s = [f"{umamba} shell init -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
with open(path) as fi:
rc_contents_after_init = fi.read()
if interpreter == "powershell":
assert "#region mamba initialize" in rc_contents_after_init
else:
assert "# >>> mamba initialize >>>" in rc_contents_after_init
assert find_path_in_str(tmp_root_prefix, rc_contents_after_init)
s = [f"{umamba} shell deinit -r {tmp_root_prefix} {xonsh_shell_args(interpreter)}"]
call(s)
if os.path.exists(path):
with open(path) as fi:
rc_contents_after_deinit = fi.read()
else:
rc_contents_after_deinit = ""
assert rc_contents_after_deinit == prev_rc_contents
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_env_activation(tmp_home, winreg_value, tmp_root_prefix, tmp_path, interpreter):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
mamba_name = Path(umamba).stem
s = [f"{umamba} shell init -r {tmp_root_prefix}"]
stdout, stderr = call_interpreter(s, tmp_path, interpreter)
def call(s):
return call_interpreter(s, tmp_path, interpreter, interactive=True)
evars = extract_vars(["CONDA_PREFIX", "CONDA_SHLVL", "PATH"], interpreter)
if interpreter == "cmd.exe":
x = helpers.read_windows_registry(regkey)
fp = Path(x[0][1:-1])
assert fp.exists()
if interpreter in ["bash", "zsh", "powershell", "cmd.exe"]:
stdout, stderr = call(evars)
s = [f"{umamba} --help"]
stdout, stderr = call(s)
s = [f"{mamba_name} activate"] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert "condabin" in res["PATH"]
assert str(tmp_root_prefix) in res["PATH"]
assert f"CONDA_PREFIX={tmp_root_prefix}" in stdout.splitlines()
assert "CONDA_SHLVL=1" in stdout.splitlines()
# throw with non-existent
if plat != "win":
with pytest.raises(subprocess.CalledProcessError):
stdout, stderr = call([f"{mamba_name} activate nonexistent"])
call([f"{mamba_name} create -n abc -y"])
call([f"{mamba_name} create -n xyz -y"])
s = [
f"{mamba_name} activate",
f"{mamba_name} activate abc",
f"{mamba_name} activate xyz",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(tmp_root_prefix / "condabin", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "bin", res["PATH"])
assert find_path_in_str(tmp_root_prefix / "envs" / "xyz", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "envs" / "abc", res["PATH"])
s = [
f"{mamba_name} activate",
f"{mamba_name} activate abc",
f"{mamba_name} activate --stack xyz",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(tmp_root_prefix / "condabin", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "bin", res["PATH"])
assert find_path_in_str(tmp_root_prefix / "envs" / "xyz", res["PATH"])
assert find_path_in_str(tmp_root_prefix / "envs" / "abc", res["PATH"])
s = [
f"{mamba_name} activate",
f"{mamba_name} activate abc",
f"{mamba_name} activate xyz --stack",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(tmp_root_prefix / "condabin", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "bin", res["PATH"])
assert find_path_in_str(tmp_root_prefix / "envs" / "xyz", res["PATH"])
assert find_path_in_str(tmp_root_prefix / "envs" / "abc", res["PATH"])
stdout, stderr = call(evars)
res = env_to_dict(stdout)
assert find_path_in_str(tmp_root_prefix / "condabin", res["PATH"])
stdout, stderr = call([f"{mamba_name} deactivate"] + evars)
res = env_to_dict(stdout)
assert find_path_in_str(tmp_root_prefix / "condabin", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "bin", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "envs" / "xyz", res["PATH"])
assert not find_path_in_str(tmp_root_prefix / "envs" / "abc", res["PATH"])
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_activation_envvars(
tmp_home,
tmp_clean_env,
winreg_value,
tmp_root_prefix,
tmp_path,
interpreter,
):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
mamba_name = Path(umamba).stem
s = [f"{umamba} shell init -r {tmp_root_prefix}"]
stdout, stderr = call_interpreter(s, tmp_path, interpreter)
def call(s):
return call_interpreter(s, tmp_path, interpreter, interactive=True)
evars = extract_vars(["CONDA_PREFIX", "CONDA_SHLVL", "PATH"], interpreter)
if interpreter == "cmd.exe":
x = helpers.read_windows_registry(regkey)
fp = Path(x[0][1:-1])
assert fp.exists()
if interpreter in ["bash", "zsh", "powershell", "cmd.exe"]:
call([f"{mamba_name} create -n def -y"])
stdout, stderr = call([f"{mamba_name} activate def"] + evars)
res = env_to_dict(stdout)
abc_prefix = pathlib.Path(res["CONDA_PREFIX"])
state_file = abc_prefix / "conda-meta" / "state"
# Python dicts are guaranteed to keep insertion order since 3.7,
# so the following works fine!
state_file.write_text(
helpers.json.dumps(
{
"env_vars": {
"test": "Test",
"HELLO": "world",
"WORKING": "/FINE/PATH/YAY",
"AAA": "last",
}
}
)
)
stdout, stderr = call(
[f"{mamba_name} activate def"]
+ evars
+ extract_vars(["TEST", "HELLO", "WORKING", "AAA"], interpreter)
)
# assert that env vars are in the same order
activation_script, stderr = call([f"{mamba_name} shell activate -s bash -n def"])
idxs = []
for el in ["TEST", "HELLO", "WORKING", "AAA"]:
for idx, line in enumerate(activation_script.splitlines()):
if line.startswith(f"export {el}="):
idxs.append(idx)
continue
assert len(idxs) == 4
# make sure that the order is correct
assert idxs == sorted(idxs)
res = env_to_dict(stdout)
assert res["TEST"] == "Test"
assert res["HELLO"] == "world"
assert res["WORKING"] == "/FINE/PATH/YAY"
assert res["AAA"] == "last"
pkg_env_vars_d = abc_prefix / "etc" / "conda" / "env_vars.d"
pkg_env_vars_d.mkdir(exist_ok=True, parents=True)
j1 = {"PKG_ONE": "FANCY_ENV_VAR", "OVERLAP": "LOSE_AGAINST_PKG_TWO"}
j2 = {
"PKG_TWO": "SUPER_FANCY_ENV_VAR",
"OVERLAP": "WINNER",
"TEST": "LOSE_AGAINST_META_STATE",
}
(pkg_env_vars_d / "001-pkg-one.json").write_text(helpers.json.dumps(j1))
(pkg_env_vars_d / "002-pkg-two.json").write_text(helpers.json.dumps(j2))
activation_script, stderr = call([f"{mamba_name} shell activate -s bash -n def"])
stdout, stderr = call(
[f"{mamba_name} activate def"]
+ evars
+ extract_vars(
[
"TEST",
"HELLO",
"WORKING",
"AAA",
"PKG_ONE",
"PKG_TWO",
"OVERLAP",
],
interpreter,
)
)
res = env_to_dict(stdout)
assert res["HELLO"] == "world"
assert res["WORKING"] == "/FINE/PATH/YAY"
assert res["AAA"] == "last"
assert res["PKG_ONE"] == "FANCY_ENV_VAR"
assert res["PKG_TWO"] == "SUPER_FANCY_ENV_VAR"
assert res["OVERLAP"] == "WINNER"
assert res["TEST"] == "Test"
@pytest.mark.parametrize("interpreter", get_interpreters())
@pytest.mark.parametrize("shared_pkgs_dirs", [True], indirect=True)
def test_unicode_activation(
tmp_home,
winreg_value,
tmp_root_prefix,
tmp_path,
interpreter,
):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
umamba = helpers.get_umamba()
mamba_name = Path(umamba).stem
s = [f"{umamba} shell init -r {tmp_root_prefix}"]
stdout, stderr = call_interpreter(s, tmp_path, interpreter)
def call(s):
return call_interpreter(s, tmp_path, interpreter, interactive=True)
evars = extract_vars(["CONDA_PREFIX", "CONDA_SHLVL", "PATH"], interpreter)
if interpreter == "cmd.exe":
x = helpers.read_windows_registry(regkey)
fp = Path(x[0][1:-1])
assert fp.exists()
if interpreter in ["bash", "zsh", "powershell", "cmd.exe"]:
stdout, stderr = call(evars)
s = [f"{umamba} --help"]
stdout, stderr = call(s)
s = [f"{mamba_name} activate"] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert "condabin" in res["PATH"]
assert str(tmp_root_prefix) in res["PATH"]
assert f"CONDA_PREFIX={tmp_root_prefix}" in stdout.splitlines()
assert "CONDA_SHLVL=1" in stdout.splitlines()
# throw with non-existent
s = [f"{mamba_name} activate nonexistent"]
if plat != "win":
with pytest.raises(subprocess.CalledProcessError):
stdout, stderr = call(s)
u1 = "μυρτιὲς"
u2 = "终过鬼门关"
u3 = "some ™∞¢3 spaces §∞©ƒ√≈ç"
s1 = [f"{mamba_name} create -n {u1} xtensor -y -c conda-forge"]
s2 = [f"{mamba_name} create -n {u2} xtensor -y -c conda-forge"]
if interpreter == "cmd.exe":
s3 = [f'{mamba_name} create -n "{u3}" xtensor -y -c conda-forge']
else:
s3 = [f"{mamba_name} create -n '{u3}' xtensor -y -c conda-forge"]
call(s1)
call(s2)
call(s3)
for u in [u1, u2, u3]:
install_prefix_root_dir = tmp_root_prefix / f"envs/{u}"
assert (install_prefix_root_dir / "conda-meta").is_dir()
assert (install_prefix_root_dir / "conda-meta/history").exists()
if plat == "win":
include_dir = install_prefix_root_dir / "Library/include"
else:
include_dir = install_prefix_root_dir / "include"
assert include_dir.is_dir()
helpers.PackageChecker("xtensor", install_prefix_root_dir).check_install_integrity()
# unicode activation on win: todo
if plat == "win":
return
s = [
f"{mamba_name} activate",
f"{mamba_name} activate {u1}",
f"{mamba_name} activate {u2}",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(str(tmp_root_prefix / "condabin"), res["PATH"])
assert not find_path_in_str(str(tmp_root_prefix / "bin"), res["PATH"])
assert find_path_in_str(str(tmp_root_prefix / "envs" / u2), res["PATH"])
assert not find_path_in_str(str(tmp_root_prefix / "envs" / u1), res["PATH"])
s = [
f"{mamba_name} activate",
f"{mamba_name} activate {u1}",
f"{mamba_name} activate {u2} --stack",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(str(tmp_root_prefix / "condabin"), res["PATH"])
assert not find_path_in_str(str(tmp_root_prefix / "bin"), res["PATH"])
assert find_path_in_str(str(tmp_root_prefix / "envs" / u1), res["PATH"])
assert find_path_in_str(str(tmp_root_prefix / "envs" / u2), res["PATH"])
s = [
f"{mamba_name} activate",
f"{mamba_name} activate '{u3}'",
] + evars
stdout, stderr = call(s)
res = env_to_dict(stdout)
assert find_path_in_str(str(tmp_root_prefix / "condabin"), res["PATH"])
assert not find_path_in_str(str(tmp_root_prefix / "bin"), res["PATH"])
assert find_path_in_str(str(tmp_root_prefix / "envs" / u3), res["PATH"])
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_activate_path(tmp_empty_env, tmp_env_name, interpreter, tmp_path):
if interpreter not in valid_interpreters or (plat == "win" and interpreter == "bash"):
pytest.skip(f"{interpreter} not available")
# Activate env name
res = helpers.shell("activate", tmp_env_name, "-s", interpreter)
dict_res = env_to_dict(res, interpreter)
assert any([str(tmp_empty_env) in p for p in dict_res.values()])
# Activate path
res = helpers.shell("activate", str(tmp_empty_env), "-s", interpreter)
dict_res = env_to_dict(res, interpreter)
assert any([str(tmp_empty_env) in p for p in dict_res.values()])
# Activate path with home
prefix_short = str(tmp_empty_env).replace(os.path.expanduser("~"), "~")
res = helpers.shell("activate", prefix_short, "-s", interpreter)
dict_res = env_to_dict(res, interpreter)
assert any([str(tmp_empty_env) in p for p in dict_res.values()])
@pytest.mark.parametrize("conda_envs_x", ["CONDA_ENVS_DIRS", "CONDA_ENVS_PATH"])
@pytest.mark.parametrize("interpreter", get_interpreters())
def test_activate_envs_dirs(
tmp_root_prefix: Path, interpreter, tmp_path: Path, conda_envs_x, monkeypatch
):
"""Activate an environment as the non leading entry in ``envs_dirs``."""
env_name = "myenv"
helpers.create("-p", tmp_path / env_name, "--offline", "--no-rc", no_dry_run=True)
monkeypatch.setenv(conda_envs_x, f"{Path('/noperm')}{os.pathsep}{tmp_path}")
res = helpers.shell("activate", env_name, "-s", interpreter)
dict_res = env_to_dict(res, interpreter)
assert any([env_name in p for p in dict_res.values()])
@pytest.fixture
def tmp_umamba():
mamba_exe = helpers.get_umamba()
shutil.copyfile(mamba_exe, mamba_exe + ".orig")
yield mamba_exe
shutil.move(mamba_exe + ".orig", mamba_exe)
os.chmod(mamba_exe, 0o755)
@pytest.mark.skipif(
"micromamba" not in Path(helpers.get_umamba()).stem,
reason="micromamba-only test",
)
@pytest.mark.parametrize("shared_pkgs_dirs", [True], indirect=True)
@pytest.mark.parametrize("interpreter", get_self_update_interpreters())
def test_self_update(
tmp_umamba,
tmp_home,
tmp_path,
tmp_root_prefix,
winreg_value,
interpreter,
):
mamba_exe = tmp_umamba
mamba_name = Path(mamba_exe).stem
shell_init = [
f"{format_path(mamba_exe, interpreter)} shell init -s {interpreter} -r {format_path(tmp_root_prefix, interpreter)}"
]
call_interpreter(shell_init, tmp_path, interpreter)
if interpreter == "bash":
assert (Path(tmp_root_prefix) / "etc" / "profile.d" / "mamba.sh").exists()
extra_start_code = []
if interpreter == "powershell":
extra_start_code = [
f'$Env:MAMBA_EXE="{mamba_exe}"',
"$MambaModuleArgs = @{ChangePs1 = $True}",
f'Import-Module "{tmp_root_prefix}\\condabin\\Mamba.psm1" -ArgumentList $MambaModuleArgs',
"Remove-Variable MambaModuleArgs",
]
elif interpreter == "bash":
if plat == "linux":
extra_start_code = ["source ~/.bashrc"]
else:
print(mamba_exe)
extra_start_code = [
f"source {PurePosixPath(tmp_home)}/.bash_profile", # HOME from os.environ not acknowledged
f"{mamba_name} info",
"echo $MAMBA_ROOT_PREFIX",
"echo $HOME",
"ls ~",
"echo $MAMBA_EXE",
]
elif interpreter == "zsh":
extra_start_code = ["source ~/.zshrc"]
call_interpreter(
extra_start_code + [f"{mamba_name} self-update --version 0.25.1 -c conda-forge"],
tmp_path,
interpreter,
interactive=False,
)
assert Path(mamba_exe).exists()
version = subprocess.check_output([mamba_exe, "--version"])
assert version.decode("utf8").strip() == "0.25.1"
assert not Path(mamba_exe + ".bkup").exists()
shutil.copyfile(mamba_exe + ".orig", mamba_exe)
os.chmod(mamba_exe, 0o755)
| WindowsProfiles |
python | pandas-dev__pandas | pandas/plotting/_matplotlib/hist.py | {
"start": 7289,
"end": 16789
} | class ____(HistPlot):
@property
def _kind(self) -> Literal["kde"]:
return "kde"
@property
def orientation(self) -> Literal["vertical"]:
return "vertical"
def __init__(
self, data, bw_method=None, ind=None, *, weights=None, **kwargs
) -> None:
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
self.weights = weights
@staticmethod
def _get_ind(y: np.ndarray, ind):
if ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
1000,
)
elif is_integer(ind):
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
ind,
)
return ind
@classmethod
# error: Signature of "_plot" incompatible with supertype "MPLPlot"
def _plot( # type: ignore[override]
cls,
ax: Axes,
y: np.ndarray,
style=None,
bw_method=None,
weights=None,
ind=None,
column_num=None,
stacking_id: int | None = None,
**kwds,
):
from scipy.stats import gaussian_kde
y = remove_na_arraylike(y)
gkde = gaussian_kde(y, bw_method=bw_method, weights=weights)
# gaussian_kde.evaluate(None) raises TypeError, so pyright requires this check
assert ind is not None
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None:
kwds["bw_method"] = self.bw_method
kwds["ind"] = type(self)._get_ind(y, ind=self.ind)
def _post_plot_logic(self, ax: Axes, data) -> None:
ax.set_ylabel("Density")
def _grouped_plot(
plotf,
data: Series | DataFrame,
column=None,
by=None,
numeric_only: bool = True,
figsize: tuple[float, float] | None = None,
sharex: bool = True,
sharey: bool = True,
layout=None,
rot: float = 0,
ax=None,
**kwargs,
):
# error: Non-overlapping equality check (left operand type: "Optional[Tuple[float,
# float]]", right operand type: "Literal['default']")
if figsize == "default": # type: ignore[comparison-overlap]
# allowed to specify mpl default with 'default'
raise ValueError(
"figsize='default' is no longer supported. "
"Specify figure size by tuple instead"
)
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = create_subplots(
naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout
)
for ax, (key, group) in zip(flatten_axes(axes), grouped, strict=False):
if numeric_only and isinstance(group, ABCDataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_hist(
data: Series | DataFrame,
column=None,
by=None,
ax=None,
bins: int = 50,
figsize: tuple[float, float] | None = None,
layout=None,
sharex: bool = False,
sharey: bool = False,
rot: float = 90,
grid: bool = True,
xlabelsize: int | None = None,
xrot=None,
ylabelsize: int | None = None,
yrot=None,
legend: bool = False,
**kwargs,
):
"""
Grouped histogram
Parameters
----------
data : Series/DataFrame
column : object, optional
by : object, optional
ax : axes, optional
bins : int, default 50
figsize : tuple, optional
layout : optional
sharex : bool, default False
sharey : bool, default False
rot : float, default 90
grid : bool, default True
legend: : bool, default False
kwargs : dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
collection of Matplotlib Axes
"""
if legend:
assert "label" not in kwargs
if data.ndim == 1:
kwargs["label"] = data.name
elif column is None:
kwargs["label"] = data.columns
else:
kwargs["label"] = column
def plot_group(group, ax) -> None:
ax.hist(group.dropna().values, bins=bins, **kwargs)
if legend:
ax.legend()
if xrot is None:
xrot = rot
fig, axes = _grouped_plot(
plot_group,
data,
column=column,
by=by,
sharex=sharex,
sharey=sharey,
ax=ax,
figsize=figsize,
layout=layout,
rot=rot,
)
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
maybe_adjust_figure(
fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3
)
return axes
def hist_series(
self: Series,
by=None,
ax=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot=None,
ylabelsize: int | None = None,
yrot=None,
figsize: tuple[float, float] | None = None,
bins: int = 10,
legend: bool = False,
**kwds,
):
import matplotlib.pyplot as plt
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is None:
if kwds.get("layout", None) is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop(
"figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)
)
if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError("passed axis not bound to passed figure")
values = self.dropna().values
if legend:
kwds["label"] = self.name
ax.hist(values, bins=bins, **kwds)
if legend:
ax.legend()
ax.grid(grid)
axes = np.array([ax])
set_ticks_props(
axes,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
)
else:
if "figure" in kwds:
raise ValueError(
"Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance will be created"
)
axes = _grouped_hist(
self,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
if hasattr(axes, "ndim"):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def hist_frame(
data: DataFrame,
column=None,
by=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot=None,
ylabelsize: int | None = None,
yrot=None,
ax=None,
sharex: bool = False,
sharey: bool = False,
figsize: tuple[float, float] | None = None,
layout=None,
bins: int = 10,
legend: bool = False,
**kwds,
):
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is not None:
axes = _grouped_hist(
data,
column=column,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
sharex=sharex,
sharey=sharey,
layout=layout,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndex)):
column = [column]
data = data[column]
# GH32590
data = data.select_dtypes(
include=(np.number, "datetime64", "datetimetz"), exclude="timedelta"
)
naxes = len(data.columns)
if naxes == 0:
raise ValueError(
"hist method requires numerical or datetime columns, nothing to plot."
)
fig, axes = create_subplots(
naxes=naxes,
ax=ax,
squeeze=False,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
)
can_set_label = "label" not in kwds
for ax, col in zip(flatten_axes(axes), data.columns, strict=False):
if legend and can_set_label:
kwds["label"] = col
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
if legend:
ax.legend()
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
maybe_adjust_figure(fig, wspace=0.3, hspace=0.3)
return axes
| KdePlot |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/modules/environ.py | {
"start": 548,
"end": 2895
} | class ____(SpyderBaseJupyterAPI):
"""
API for managing the environment variables on the remote server.
Raises
------
SpyderRemoteAPIError
If the API call fails.
aiohttp.ClientResponseError
If the API call fails with a client error.
"""
base_url = SPYDER_PLUGIN_NAME + "/environ"
async def _raise_for_status(self, response: aiohttp.ClientResponse):
return response.raise_for_status()
async def get(self, name: str, default: str | None = None) -> str | None:
"""
Get the environment variable value for the given name.
Parameters
----------
name : str
The name of the environment variable.
Returns
-------
str
The value of the environment variable.
Raises
------
SpyderRemoteAPIError
If the API call fails.
"""
try:
async with self.session.get(self.api_url / name) as response:
return await response.text()
except aiohttp.ClientResponseError as e:
if e.status == HTTPStatus.NOT_FOUND:
return default
msg = f"Failed to get environment variable '{name}': {e}"
raise SpyderRemoteAPIError(msg) from e
async def set(self, name: str, value: str) -> None:
"""
Set the environment variable value for the given name.
Parameters
----------
name : str
The name of the environment variable.
value : str
The value of the environment variable.
"""
await self.session.post(self.api_url / name, data={"value": value})
async def delete(self, name: str) -> None:
"""
Delete the environment variable for the given name.
Parameters
----------
name : str
The name of the environment variable.
"""
await self.session.delete(self.api_url / name)
async def to_dict(self) -> dict[str, str]:
"""
Get the environment variables as a dictionary.
Returns
-------
dict[str, str]
The environment variables as a dictionary.
"""
async with self.session.get(self.api_url) as response:
return await response.json()
| SpyderRemoteEnvironAPI |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/hitl.py | {
"start": 1624,
"end": 1907
} | class ____(BaseModel):
"""Schema for writing the response part of a Human-in-the-loop detail for a specific task instance."""
ti_id: UUID
chosen_options: list[str] = Field(min_length=1)
params_input: dict[str, Any] = Field(default_factory=dict)
| UpdateHITLDetailPayload |
python | apache__airflow | providers/common/compat/tests/unit/common/compat/test__compat_utils.py | {
"start": 919,
"end": 9054
} | class ____:
"""Unit tests for the create_module_getattr utility function."""
@pytest.mark.parametrize(
("name", "import_map", "is_module"),
[
("BaseHook", {"BaseHook": "airflow.hooks.base"}, False),
("timezone", {}, True), # Will be tested with module_map
("utcnow", {"utcnow": "airflow.utils.timezone"}, False),
],
)
def test_single_path_import(self, name, import_map, is_module):
"""Test basic single-path imports work correctly."""
if name == "timezone":
getattr_fn = create_module_getattr(import_map={}, module_map={name: "airflow.utils.timezone"})
else:
getattr_fn = create_module_getattr(import_map=import_map)
result = getattr_fn(name)
if is_module:
# Check if it's a module
import types
assert isinstance(result, types.ModuleType)
else:
# Check if it's a class or callable
assert isinstance(result, type) or callable(result)
@pytest.mark.parametrize(
("name", "paths", "should_succeed"),
[
("BaseHook", ("airflow.sdk", "airflow.hooks.base"), True),
("NonExistent", ("fake.module1", "fake.module2"), False),
("timezone", ("airflow.sdk.timezone", "airflow.utils.timezone"), True),
],
)
def test_fallback_import_mechanism(self, name, paths, should_succeed):
"""Test that fallback paths are tried in order."""
if name == "timezone":
getattr_fn = create_module_getattr(import_map={}, module_map={name: paths})
else:
getattr_fn = create_module_getattr(import_map={name: paths})
if should_succeed:
result = getattr_fn(name)
assert result is not None
else:
with pytest.raises(ImportError, match=f"Could not import {name!r}"):
getattr_fn(name)
def test_rename_map_tries_new_then_old(self):
"""Test that renamed classes try new name first, then fall back to old."""
rename_map = {
"Asset": ("airflow.sdk", "airflow.datasets", "Dataset"),
}
getattr_fn = create_module_getattr(import_map={}, rename_map=rename_map)
# Should successfully import (either Asset from airflow.sdk or Dataset from airflow.datasets)
result = getattr_fn("Asset")
assert result is not None
# In Airflow 3, it's Asset; in Airflow 2, it would be Dataset
assert result.__name__ in ("Asset", "Dataset")
def test_module_map_imports_whole_module(self):
"""Test that module_map imports entire modules, not just attributes."""
module_map = {"timezone": "airflow.utils.timezone"}
getattr_fn = create_module_getattr(import_map={}, module_map=module_map)
result = getattr_fn("timezone")
assert hasattr(result, "utc") # Module should have attributes
assert hasattr(result, "utcnow")
def test_exception_chaining_preserves_context(self):
"""Test that exception chaining with 'from' preserves original error context."""
import_map = {"NonExistent": ("fake.module1", "fake.module2")}
getattr_fn = create_module_getattr(import_map=import_map)
with pytest.raises(ImportError) as exc_info:
getattr_fn("NonExistent")
# Verify exception has __cause__ (exception chaining)
assert exc_info.value.__cause__ is not None
@pytest.mark.parametrize(
("error_scenario", "map_config", "expected_match"),
[
(
"import_error",
{"import_map": {"Fake": ("nonexistent.mod1", "nonexistent.mod2")}},
"Could not import 'Fake' from any of:",
),
(
"module_error",
{"module_map": {"fake_mod": ("nonexistent.module1", "nonexistent.module2")}},
"Could not import module 'fake_mod' from any of:",
),
(
"rename_error",
{"rename_map": {"NewName": ("fake.new", "fake.old", "OldName")}},
"Could not import 'NewName' from 'fake.new' or 'OldName' from 'fake.old'",
),
],
)
def test_error_messages_include_all_paths(self, error_scenario, map_config, expected_match):
"""Test that error messages include all attempted paths for debugging."""
getattr_fn = create_module_getattr(
import_map=map_config.get("import_map", {}),
module_map=map_config.get("module_map"),
rename_map=map_config.get("rename_map"),
)
keys = (
map_config.get("import_map", {}).keys()
or map_config.get("module_map", {}).keys()
or map_config.get("rename_map", {}).keys()
)
name = next(iter(keys))
with pytest.raises(ImportError, match=expected_match):
getattr_fn(name)
def test_attribute_error_for_unknown_name(self):
"""Test that accessing unknown attributes raises AttributeError with correct message."""
getattr_fn = create_module_getattr(import_map={"BaseHook": "airflow.hooks.base"})
with pytest.raises(AttributeError, match="module has no attribute 'UnknownClass'"):
getattr_fn("UnknownClass")
def test_optional_params_default_to_empty(self):
"""Test that module_map and rename_map default to empty dicts when not provided."""
getattr_fn = create_module_getattr(import_map={"BaseHook": "airflow.hooks.base"})
# Should work fine without module_map and rename_map
result = getattr_fn("BaseHook")
assert result is not None
# Should raise AttributeError for names not in any map
with pytest.raises(AttributeError):
getattr_fn("NonExistent")
def test_priority_order_rename_then_module_then_import(self):
"""Test that rename_map has priority over module_map, which has priority over import_map."""
# If a name exists in multiple maps, rename_map should be checked first
import_map = {"test": "airflow.hooks.base"}
module_map = {"test": "airflow.utils.timezone"}
rename_map = {"test": ("airflow.sdk", "airflow.datasets", "Dataset")}
getattr_fn = create_module_getattr(
import_map=import_map,
module_map=module_map,
rename_map=rename_map,
)
# Should use rename_map (which tries to import Asset/Dataset)
result = getattr_fn("test")
# Verify it came from rename_map (Asset or Dataset class, depending on Airflow version)
assert hasattr(result, "__name__")
assert result.__name__ in ("Asset", "Dataset")
def test_module_not_found_error_is_caught(self):
"""Test that ModuleNotFoundError (Python 3.6+) is properly caught."""
import_map = {"Fake": "completely.nonexistent.module.that.does.not.exist"}
getattr_fn = create_module_getattr(import_map=import_map)
# Should catch ModuleNotFoundError and raise ImportError
with pytest.raises(ImportError, match="Could not import 'Fake'"):
getattr_fn("Fake")
@pytest.mark.parametrize(
("map_type", "config"),
[
("import_map", {"BaseHook": "airflow.hooks.base"}),
("module_map", {"timezone": "airflow.utils.timezone"}),
("rename_map", {"Asset": ("airflow.sdk", "airflow.datasets", "Dataset")}),
],
)
def test_each_map_type_works_independently(self, map_type, config):
"""Test that each map type (import, module, rename) works correctly on its own."""
kwargs = {"import_map": {}}
if map_type == "import_map":
kwargs["import_map"] = config
elif map_type == "module_map":
kwargs["module_map"] = config
elif map_type == "rename_map":
kwargs["rename_map"] = config
getattr_fn = create_module_getattr(**kwargs)
name = next(iter(config.keys()))
result = getattr_fn(name)
assert result is not None
| TestCreateModuleGetattr |
python | encode__django-rest-framework | rest_framework/relations.py | {
"start": 7880,
"end": 8205
} | class ____(RelatedField):
"""
A read only field that represents its targets using their
plain string representation.
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super().__init__(**kwargs)
def to_representation(self, value):
return str(value)
| StringRelatedField |
python | openai__openai-python | src/openai/types/beta/assistant_create_params.py | {
"start": 988,
"end": 5348
} | class ____(TypedDict, total=False):
model: Required[Union[str, ChatModel]]
"""ID of the model to use.
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
"""
description: Optional[str]
"""The description of the assistant. The maximum length is 512 characters."""
instructions: Optional[str]
"""The system instructions that the assistant uses.
The maximum length is 256,000 characters.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
name: Optional[str]
"""The name of the assistant. The maximum length is 256 characters."""
reasoning_effort: Optional[ReasoningEffort]
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
"""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic.
"""
tool_resources: Optional[ToolResources]
"""A set of resources that are used by the assistant's tools.
The resources are specific to the type of tool. For example, the
`code_interpreter` tool requires a list of file IDs, while the `file_search`
tool requires a list of vector store IDs.
"""
tools: Iterable[AssistantToolParam]
"""A list of tool enabled on the assistant.
There can be a maximum of 128 tools per assistant. Tools can be of types
`code_interpreter`, `file_search`, or `function`.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
"""
| AssistantCreateParams |
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-to-make-string-k-special.py | {
"start": 2529,
"end": 2859
} | class ____(object):
def minimumDeletions(self, word, k):
"""
:type word: str
:type k: int
:rtype: int
"""
cnt = [0]*26
for x in word:
cnt[ord(x)-ord('a')] += 1
return min(sum(y if y < x else max(y-(x+k), 0) for y in cnt if y) for x in cnt if x)
| Solution3 |
python | pytest-dev__pytest-django | tests/test_db_setup.py | {
"start": 9021,
"end": 10704
} | class ____:
db_settings: ClassVar = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "/tmp/should-not-be-used",
},
"db2": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "db_name",
"TEST": {"NAME": "test_custom_db_name"},
},
}
def test_sqlite_database_renamed(self, django_pytester: DjangoPytester) -> None:
pytest.importorskip("xdist")
django_pytester.create_test_module(
"""
import pytest
from django.db import connections
@pytest.mark.django_db
def test_a():
(conn_db2, conn_default) = sorted(
connections.all(),
key=lambda conn: conn.alias,
)
assert conn_default.vendor == 'sqlite'
db_name = conn_default.creation._get_test_db_name()
# can_share_in_memory_db was removed in Django 2.1, and
# used in _get_test_db_name before.
if getattr(conn_default.features, "can_share_in_memory_db", True):
assert 'file:memorydb' in db_name
else:
assert db_name == ":memory:"
assert conn_db2.vendor == 'sqlite'
db_name = conn_db2.creation._get_test_db_name()
assert db_name.startswith('test_custom_db_name_gw')
"""
)
result = django_pytester.runpytest_subprocess("--tb=short", "-vv", "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
| TestSqliteWithMultipleDbsAndXdist |
python | Farama-Foundation__Gymnasium | gymnasium/envs/registration.py | {
"start": 8873,
"end": 42407
} | class ____(Enum):
"""All possible vectorization modes used in `make_vec`."""
ASYNC = "async"
SYNC = "sync"
VECTOR_ENTRY_POINT = "vector_entry_point"
# Global registry of environments. Meant to be accessed through `register` and `make`
registry: dict[str, EnvSpec] = {}
current_namespace: str | None = None
def parse_env_id(env_id: str) -> tuple[str | None, str, int | None]:
"""Parse environment ID string format - ``[namespace/](env-name)[-v(version)]`` where the namespace and version are optional.
Args:
env_id: The environment id to parse
Returns:
A tuple of environment namespace, environment name and version number
Raises:
Error: If the environment id is not valid environment regex
"""
match = ENV_ID_RE.fullmatch(env_id)
if not match:
raise error.Error(
f"Malformed environment ID: {env_id}. (Currently all IDs must be of the form [namespace/](env-name)-v(version). (namespace is optional))"
)
ns, name, version = match.group("namespace", "name", "version")
if version is not None:
version = int(version)
return ns, name, version
def get_env_id(ns: str | None, name: str, version: int | None) -> str:
"""Get the full env ID given a name and (optional) version and namespace. Inverse of :meth:`parse_env_id`.
Args:
ns: The environment namespace
name: The environment name
version: The environment version
Returns:
The environment id
"""
full_name = name
if ns is not None:
full_name = f"{ns}/{name}"
if version is not None:
full_name = f"{full_name}-v{version}"
return full_name
def find_highest_version(ns: str | None, name: str) -> int | None:
"""Finds the highest registered version of the environment given the namespace and name in the registry.
Args:
ns: The environment namespace
name: The environment name (id)
Returns:
The highest version of an environment with matching namespace and name, otherwise ``None`` is returned.
"""
version: list[int] = [
env_spec.version
for env_spec in registry.values()
if env_spec.namespace == ns
and env_spec.name == name
and env_spec.version is not None
]
return max(version, default=None)
def _check_namespace_exists(ns: str | None):
"""Check if a namespace exists. If it doesn't, print a helpful error message."""
# If the namespace is none, then the namespace does exist
if ns is None:
return
# Check if the namespace exists in one of the registry's specs
namespaces: set[str] = {
env_spec.namespace
for env_spec in registry.values()
if env_spec.namespace is not None
}
if ns in namespaces:
return
# Otherwise, the namespace doesn't exist and raise a helpful message
suggestion = (
difflib.get_close_matches(ns, namespaces, n=1) if len(namespaces) > 0 else None
)
if suggestion:
suggestion_msg = f"Did you mean: `{suggestion[0]}`?"
else:
suggestion_msg = f"Have you installed the proper package for {ns}?"
raise error.NamespaceNotFound(f"Namespace {ns} not found. {suggestion_msg}")
def _check_name_exists(ns: str | None, name: str):
"""Check if an env exists in a namespace. If it doesn't, print a helpful error message."""
# First check if the namespace exists
_check_namespace_exists(ns)
# Then check if the name exists
names: set[str] = {
env_spec.name for env_spec in registry.values() if env_spec.namespace == ns
}
if name in names:
return
# Otherwise, raise a helpful error to the user
suggestion = difflib.get_close_matches(name, names, n=1)
namespace_msg = f" in namespace {ns}" if ns else ""
suggestion_msg = f" Did you mean: `{suggestion[0]}`?" if suggestion else ""
raise error.NameNotFound(
f"Environment `{name}` doesn't exist{namespace_msg}.{suggestion_msg}"
)
def _check_version_exists(ns: str | None, name: str, version: int | None):
"""Check if an env version exists in a namespace. If it doesn't, print a helpful error message.
This is a complete test whether an environment identifier is valid, and will provide the best available hints.
Args:
ns: The environment namespace
name: The environment space
version: The environment version
Raises:
DeprecatedEnv: The environment doesn't exist but a default version does
VersionNotFound: The ``version`` used doesn't exist
DeprecatedEnv: Environment version is deprecated
"""
if get_env_id(ns, name, version) in registry:
return
_check_name_exists(ns, name)
if version is None:
return
message = f"Environment version `v{version}` for environment `{get_env_id(ns, name, None)}` doesn't exist."
env_specs = [
env_spec
for env_spec in registry.values()
if env_spec.namespace == ns and env_spec.name == name
]
env_specs = sorted(env_specs, key=lambda env_spec: int(env_spec.version or -1))
default_spec = [env_spec for env_spec in env_specs if env_spec.version is None]
if default_spec:
message += f" It provides the default version `{default_spec[0].id}`."
if len(env_specs) == 1:
raise error.DeprecatedEnv(message)
# Process possible versioned environments
versioned_specs = [
env_spec for env_spec in env_specs if env_spec.version is not None
]
latest_spec = max(versioned_specs, key=lambda env_spec: env_spec.version, default=None) # type: ignore
if latest_spec is not None and version > latest_spec.version:
version_list_msg = ", ".join(f"`v{env_spec.version}`" for env_spec in env_specs)
message += f" It provides versioned environments: [ {version_list_msg} ]."
raise error.VersionNotFound(message)
if latest_spec is not None and version < latest_spec.version:
raise error.DeprecatedEnv(
f"Environment version v{version} for `{get_env_id(ns, name, None)}` is deprecated. "
f"Please use `{latest_spec.id}` instead."
)
def _check_spec_register(testing_spec: EnvSpec):
"""Checks whether the spec is valid to be registered. Helper function for `register`."""
latest_versioned_spec = max(
(
env_spec
for env_spec in registry.values()
if env_spec.namespace == testing_spec.namespace
and env_spec.name == testing_spec.name
and env_spec.version is not None
),
key=lambda spec_: int(spec_.version), # type: ignore
default=None,
)
unversioned_spec = next(
(
env_spec
for env_spec in registry.values()
if env_spec.namespace == testing_spec.namespace
and env_spec.name == testing_spec.name
and env_spec.version is None
),
None,
)
if unversioned_spec is not None and testing_spec.version is not None:
raise error.RegistrationError(
"Can't register the versioned environment "
f"`{testing_spec.id}` when the unversioned environment "
f"`{unversioned_spec.id}` of the same name already exists."
)
elif latest_versioned_spec is not None and testing_spec.version is None:
raise error.RegistrationError(
f"Can't register the unversioned environment `{testing_spec.id}` when the versioned environment "
f"`{latest_versioned_spec.id}` of the same name already exists. Note: the default behavior is "
"that `gym.make` with the unversioned environment will return the latest versioned environment"
)
def _check_metadata(testing_metadata: dict[str, Any]):
"""Check the metadata of an environment."""
if not isinstance(testing_metadata, dict):
raise error.InvalidMetadata(
f"Expect the environment metadata to be dict, actual type: {type(metadata)}"
)
render_modes = testing_metadata.get("render_modes")
if render_modes is None:
logger.warn(
f"The environment creator metadata doesn't include `render_modes`, contains: {list(testing_metadata.keys())}"
)
elif not isinstance(render_modes, Iterable):
logger.warn(
f"Expects the environment metadata render_modes to be a Iterable, actual type: {type(render_modes)}"
)
def _find_spec(env_id: str) -> EnvSpec:
# For string id's, load the environment spec from the registry then make the environment spec
assert isinstance(env_id, str)
# The environment name can include an unloaded module in "module:env_name" style
module, env_name = (None, env_id) if ":" not in env_id else env_id.split(":")
if module is not None:
try:
importlib.import_module(module)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"{e}. Environment registration via importing a module failed. "
f"Check whether '{module}' contains env registration and can be imported."
) from e
# load the env spec from the registry
env_spec = registry.get(env_name)
# update env spec is not version provided, raise warning if out of date
ns, name, version = parse_env_id(env_name)
latest_version = find_highest_version(ns, name)
if version is not None and latest_version is not None and latest_version > version:
logger.deprecation(
f"The environment {env_name} is out of date. You should consider "
f"upgrading to version `v{latest_version}`."
)
if version is None and latest_version is not None:
version = latest_version
new_env_id = get_env_id(ns, name, version)
env_spec = registry.get(new_env_id)
logger.warn(
f"Using the latest versioned environment `{new_env_id}` "
f"instead of the unversioned environment `{env_name}`."
)
if env_spec is None:
_check_version_exists(ns, name, version)
raise error.Error(
f"No registered env with id: {env_name}. Did you register it, or import the package that registers it? Use `gymnasium.pprint_registry()` to see all of the registered environments."
)
return env_spec
def load_env_creator(name: str) -> EnvCreator | VectorEnvCreator:
"""Loads an environment with name of style ``"(import path):(environment name)"`` and returns the environment creation function, normally the environment class type.
Args:
name: The environment name
Returns:
The environment constructor for the given environment name.
"""
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
def register_envs(env_module: ModuleType):
"""A No-op function such that it can appear to IDEs that a module is used."""
pass
@contextlib.contextmanager
def namespace(ns: str):
"""Context manager for modifying the current namespace."""
global current_namespace
old_namespace = current_namespace
current_namespace = ns
yield
current_namespace = old_namespace
def register(
id: str,
entry_point: EnvCreator | str | None = None,
reward_threshold: float | None = None,
nondeterministic: bool = False,
max_episode_steps: int | None = None,
order_enforce: bool = True,
disable_env_checker: bool = False,
additional_wrappers: tuple[WrapperSpec, ...] = (),
vector_entry_point: VectorEnvCreator | str | None = None,
kwargs: dict | None = None,
):
"""Registers an environment in gymnasium with an ``id`` to use with :meth:`gymnasium.make` with the ``entry_point`` being a string or callable for creating the environment.
The ``id`` parameter corresponds to the name of the environment, with the syntax as follows:
``[namespace/](env_name)[-v(version)]`` where ``namespace`` and ``-v(version)`` is optional.
It takes arbitrary keyword arguments, which are passed to the :class:`EnvSpec` ``kwargs`` parameter.
Args:
id: The environment id
entry_point: The entry point for creating the environment
reward_threshold: The reward threshold considered for an agent to have learnt the environment
nondeterministic: If the environment is nondeterministic (even with knowledge of the initial seed and all actions, the same state cannot be reached)
max_episode_steps: The maximum number of episodes steps before truncation. Used by the :class:`gymnasium.wrappers.TimeLimit` wrapper if not ``None``.
order_enforce: If to enable the order enforcer wrapper to ensure users run functions in the correct order.
If ``True``, then the :class:`gymnasium.wrappers.OrderEnforcing` is applied to the environment.
disable_env_checker: If to disable the :class:`gymnasium.wrappers.PassiveEnvChecker` to the environment.
additional_wrappers: Additional wrappers to apply the environment.
vector_entry_point: The entry point for creating the vector environment
kwargs: arbitrary keyword arguments which are passed to the environment constructor on initialisation.
Changelogs:
v1.0.0 - `autoreset` and `apply_api_compatibility` parameter was removed
"""
assert (
entry_point is not None or vector_entry_point is not None
), "Either `entry_point` or `vector_entry_point` (or both) must be provided"
ns, name, version = parse_env_id(id)
if kwargs is None:
kwargs = dict()
if current_namespace is not None:
if (
kwargs.get("namespace") is not None
and kwargs.get("namespace") != current_namespace
):
logger.warn(
f"Custom namespace `{kwargs.get('namespace')}` is being overridden by namespace `{current_namespace}`. "
f"If you are developing a plugin you shouldn't specify a namespace in `register` calls. "
"The namespace is specified through the entry point package metadata."
)
ns_id = current_namespace
else:
ns_id = ns
full_env_id = get_env_id(ns_id, name, version)
new_spec = EnvSpec(
id=full_env_id,
entry_point=entry_point,
reward_threshold=reward_threshold,
nondeterministic=nondeterministic,
max_episode_steps=max_episode_steps,
order_enforce=order_enforce,
disable_env_checker=disable_env_checker,
kwargs=kwargs,
additional_wrappers=additional_wrappers,
vector_entry_point=vector_entry_point,
)
_check_spec_register(new_spec)
if new_spec.id in registry:
logger.warn(f"Overriding environment {new_spec.id} already in registry.")
registry[new_spec.id] = new_spec
def make(
id: str | EnvSpec,
max_episode_steps: int | None = None,
disable_env_checker: bool | None = None,
**kwargs: Any,
) -> Env:
"""Creates an environment previously registered with :meth:`gymnasium.register` or a :class:`EnvSpec`.
To find all available environments use ``gymnasium.envs.registry.keys()`` for all valid ids.
Args:
id: A string for the environment id or a :class:`EnvSpec`. Optionally if using a string, a module to import can be included, e.g. ``'module:Env-v0'``.
This is equivalent to importing the module first to register the environment followed by making the environment.
max_episode_steps: Maximum length of an episode, can override the registered :class:`EnvSpec` ``max_episode_steps``
with the value being passed to :class:`gymnasium.wrappers.TimeLimit`.
Using ``max_episode_steps=-1`` will not apply the wrapper to the environment.
disable_env_checker: If to add :class:`gymnasium.wrappers.PassiveEnvChecker`, ``None`` will default to the
:class:`EnvSpec` ``disable_env_checker`` value otherwise use this value will be used.
kwargs: Additional arguments to pass to the environment constructor.
Returns:
An instance of the environment with wrappers applied.
Raises:
Error: If the ``id`` doesn't exist in the :attr:`registry`
Changelogs:
v1.0.0 - `autoreset` and `apply_api_compatibility` was removed
"""
if isinstance(id, EnvSpec):
env_spec = id
if not hasattr(env_spec, "additional_wrappers"):
logger.warn(
f"The env spec passed to `make` does not have a `additional_wrappers`, set it to an empty tuple. Env_spec={env_spec}"
)
env_spec.additional_wrappers = ()
else:
# For string id's, load the environment spec from the registry then make the environment spec
assert isinstance(id, str)
# The environment name can include an unloaded module in "module:env_name" style
env_spec = _find_spec(id)
assert isinstance(env_spec, EnvSpec)
# Update the env spec kwargs with the `make` kwargs
env_spec_kwargs = copy.deepcopy(env_spec.kwargs)
env_spec_kwargs.update(kwargs)
# Load the environment creator
if env_spec.entry_point is None:
raise error.Error(f"{env_spec.id} registered but entry_point is not specified")
elif callable(env_spec.entry_point):
env_creator = env_spec.entry_point
else:
# Assume it's a string
env_creator = load_env_creator(env_spec.entry_point)
# Determine if to use the rendering
render_modes: list[str] | None = None
if hasattr(env_creator, "metadata"):
_check_metadata(env_creator.metadata)
render_modes = env_creator.metadata.get("render_modes")
render_mode = env_spec_kwargs.get("render_mode")
apply_human_rendering = False
apply_render_collection = False
# If mode is not valid, try applying HumanRendering/RenderCollection wrappers
if (
render_mode is not None
and render_modes is not None
and render_mode not in render_modes
):
displayable_modes = {"rgb_array", "rgb_array_list"}.intersection(render_modes)
if render_mode == "human" and len(displayable_modes) > 0:
logger.warn(
"You are trying to use 'human' rendering for an environment that doesn't natively support it. "
"The HumanRendering wrapper is being applied to your environment."
)
env_spec_kwargs["render_mode"] = displayable_modes.pop()
apply_human_rendering = True
elif (
render_mode.endswith("_list")
and render_mode[: -len("_list")] in render_modes
):
env_spec_kwargs["render_mode"] = render_mode[: -len("_list")]
apply_render_collection = True
else:
logger.warn(
f"The environment is being initialised with render_mode={render_mode!r} "
f"that is not in the possible render_modes ({render_modes})."
)
try:
env = env_creator(**env_spec_kwargs)
except TypeError as e:
if (
str(e).find("got an unexpected keyword argument 'render_mode'") >= 0
and apply_human_rendering
):
raise error.Error(
f"You passed render_mode='human' although {env_spec.id} doesn't implement human-rendering natively. "
"Gym tried to apply the HumanRendering wrapper but it looks like your environment is using the old "
"rendering API, which is not supported by the HumanRendering wrapper."
) from e
else:
raise type(e)(
f"{e} was raised from the environment creator for {env_spec.id} with kwargs ({env_spec_kwargs})"
)
if not isinstance(env, gym.Env):
if (
str(env.__class__.__base__) == "<class 'gym.core.Env'>"
or str(env.__class__.__base__) == "<class 'gym.core.Wrapper'>"
):
raise TypeError(
"Gym is incompatible with Gymnasium, please update the environment class to `gymnasium.Env`. "
"See https://gymnasium.farama.org/introduction/create_custom_env/ for more info."
)
else:
raise TypeError(
f"The environment must inherit from the gymnasium.Env class, actual class: {type(env)}. "
"See https://gymnasium.farama.org/introduction/create_custom_env/ for more info."
)
# Set the minimal env spec for the environment.
env.unwrapped.spec = EnvSpec(
id=env_spec.id,
entry_point=env_spec.entry_point,
reward_threshold=env_spec.reward_threshold,
nondeterministic=env_spec.nondeterministic,
max_episode_steps=None,
order_enforce=False,
disable_env_checker=True,
kwargs=env_spec_kwargs,
additional_wrappers=(),
vector_entry_point=env_spec.vector_entry_point,
)
# Check if pre-wrapped wrappers
assert env.spec is not None
num_prior_wrappers = len(env.spec.additional_wrappers)
if (
env_spec.additional_wrappers[:num_prior_wrappers]
!= env.spec.additional_wrappers
):
for env_spec_wrapper_spec, recreated_wrapper_spec in zip(
env_spec.additional_wrappers, env.spec.additional_wrappers
):
raise ValueError(
f"The environment's wrapper spec {recreated_wrapper_spec} is different from the saved `EnvSpec` additional wrapper {env_spec_wrapper_spec}"
)
# Run the environment checker as the lowest level wrapper
if disable_env_checker is False or (
disable_env_checker is None and env_spec.disable_env_checker is False
):
env = gym.wrappers.PassiveEnvChecker(env)
# Add the order enforcing wrapper
if env_spec.order_enforce:
env = gym.wrappers.OrderEnforcing(env)
# Add the time limit wrapper
if max_episode_steps != -1:
if max_episode_steps is not None:
env = gym.wrappers.TimeLimit(env, max_episode_steps)
elif env_spec.max_episode_steps is not None:
env = gym.wrappers.TimeLimit(env, env_spec.max_episode_steps)
for wrapper_spec in env_spec.additional_wrappers[num_prior_wrappers:]:
if wrapper_spec.kwargs is None:
raise ValueError(
f"{wrapper_spec.name} wrapper does not inherit from `gymnasium.utils.RecordConstructorArgs`, therefore, the wrapper cannot be recreated."
)
env = load_env_creator(wrapper_spec.entry_point)(env=env, **wrapper_spec.kwargs)
# Add human rendering wrapper
if apply_human_rendering:
env = gym.wrappers.HumanRendering(env)
elif apply_render_collection:
env = gym.wrappers.RenderCollection(env)
return env
def make_vec(
id: str | EnvSpec,
num_envs: int = 1,
vectorization_mode: VectorizeMode | str | None = None,
vector_kwargs: dict[str, Any] | None = None,
wrappers: Sequence[Callable[[Env], Wrapper]] | None = None,
**kwargs,
) -> gym.vector.VectorEnv:
"""Create a vector environment according to the given ID.
To find all available environments use :func:`gymnasium.pprint_registry` or ``gymnasium.registry.keys()`` for all valid ids.
We refer to the Vector environment as the vectorizor while the environment being vectorized is the base or vectorized environment (``vectorizor(vectorized env)``).
Args:
id: Name of the environment. Optionally, a module to import can be included, e.g. 'module:Env-v0'
num_envs: Number of environments to create
vectorization_mode: The vectorization method used, defaults to ``None`` such that if env id' spec has a ``vector_entry_point`` (not ``None``),
this is first used otherwise defaults to ``sync`` to use the :class:`gymnasium.vector.SyncVectorEnv`.
Valid modes are ``"async"``, ``"sync"`` or ``"vector_entry_point"``. Recommended to use the :class:`VectorizeMode` enum rather than strings.
vector_kwargs: Additional arguments to pass to the vectorizor environment constructor, i.e., ``SyncVectorEnv(..., **vector_kwargs)``.
wrappers: A sequence of wrapper functions to apply to the base environment. Can only be used in ``"sync"`` or ``"async"`` mode.
**kwargs: Additional arguments passed to the base environment constructor.
Returns:
An instance of the environment.
Raises:
Error: If the ``id`` doesn't exist then an error is raised
"""
if vector_kwargs is None:
vector_kwargs = {}
if wrappers is None:
wrappers = []
if isinstance(id, EnvSpec):
env_spec = id
elif isinstance(id, str):
env_spec = _find_spec(id)
else:
raise error.Error(f"Invalid id type: {type(id)}. Expected `str` or `EnvSpec`")
env_spec = copy.deepcopy(env_spec)
env_spec_kwargs = env_spec.kwargs
# for sync or async, these parameters should be passed in `make(..., **kwargs)` rather than in the env spec kwargs, therefore, we `reset` the kwargs
env_spec.kwargs = dict()
num_envs = env_spec_kwargs.pop("num_envs", num_envs)
vectorization_mode = env_spec_kwargs.pop("vectorization_mode", vectorization_mode)
vector_kwargs = env_spec_kwargs.pop("vector_kwargs", vector_kwargs)
wrappers = env_spec_kwargs.pop("wrappers", wrappers)
env_spec_kwargs.update(kwargs)
# Specify the vectorization mode if None or update to a `VectorizeMode`
if vectorization_mode is None:
if env_spec.vector_entry_point is not None:
vectorization_mode = VectorizeMode.VECTOR_ENTRY_POINT
else:
vectorization_mode = VectorizeMode.SYNC
else:
try:
vectorization_mode = VectorizeMode(vectorization_mode)
except ValueError:
raise ValueError(
f"Invalid vectorization mode: {vectorization_mode!r}, "
f"valid modes: {[mode.value for mode in VectorizeMode]}"
)
assert isinstance(vectorization_mode, VectorizeMode)
def create_single_env() -> Env:
single_env = make(env_spec, **env_spec_kwargs.copy())
if wrappers is None:
return single_env
for wrapper in wrappers:
single_env = wrapper(single_env)
return single_env
if vectorization_mode == VectorizeMode.SYNC:
if env_spec.entry_point is None:
raise error.Error(
f"Cannot create vectorized environment for {env_spec.id} because it doesn't have an entry point defined."
)
env = gym.vector.SyncVectorEnv(
env_fns=(create_single_env for _ in range(num_envs)),
**vector_kwargs,
)
elif vectorization_mode == VectorizeMode.ASYNC:
if env_spec.entry_point is None:
raise error.Error(
f"Cannot create vectorized environment for {env_spec.id} because it doesn't have an entry point defined."
)
env = gym.vector.AsyncVectorEnv(
env_fns=[create_single_env for _ in range(num_envs)],
**vector_kwargs,
)
elif vectorization_mode == VectorizeMode.VECTOR_ENTRY_POINT:
if len(vector_kwargs) > 0:
raise error.Error(
f"Custom vector environment can be passed arguments only through kwargs and `vector_kwargs` is not empty ({vector_kwargs})"
)
elif len(wrappers) > 0:
raise error.Error(
f"Cannot use `vector_entry_point` vectorization mode with the wrappers argument ({wrappers})."
)
elif len(env_spec.additional_wrappers) > 0:
raise error.Error(
f"Cannot use `vector_entry_point` vectorization mode with the additional_wrappers parameter in spec being not empty ({env_spec.additional_wrappers})."
)
entry_point = env_spec.vector_entry_point
if entry_point is None:
raise error.Error(
f"Cannot create vectorized environment for {id} because it doesn't have a vector entry point defined."
)
elif callable(entry_point):
env_creator = entry_point
else: # Assume it's a string
env_creator = load_env_creator(entry_point)
if (
env_spec.max_episode_steps is not None
and "max_episode_steps" not in env_spec_kwargs
):
env_spec_kwargs["max_episode_steps"] = env_spec.max_episode_steps
env = env_creator(num_envs=num_envs, **env_spec_kwargs)
else:
raise error.Error(f"Unknown vectorization mode: {vectorization_mode}")
# Copies the environment creation specification and kwargs to add to the environment specification details
copied_id_spec = copy.deepcopy(env_spec)
copied_id_spec.kwargs = env_spec_kwargs.copy()
if num_envs != 1:
copied_id_spec.kwargs["num_envs"] = num_envs
copied_id_spec.kwargs["vectorization_mode"] = vectorization_mode.value
if len(vector_kwargs) > 0:
copied_id_spec.kwargs["vector_kwargs"] = vector_kwargs
if len(wrappers) > 0:
copied_id_spec.kwargs["wrappers"] = wrappers
env.unwrapped.spec = copied_id_spec
if "autoreset_mode" not in env.metadata:
warn(
f"The VectorEnv ({env}) is missing AutoresetMode metadata, metadata={env.metadata}"
)
elif not isinstance(env.metadata["autoreset_mode"], AutoresetMode):
warn(
f"The VectorEnv ({env}) metadata['autoreset_mode'] is not an instance of AutoresetMode, {type(env.metadata['autoreset_mode'])}."
)
return env
def spec(env_id: str) -> EnvSpec:
"""Retrieve the :class:`EnvSpec` for the environment id from the :attr:`registry`.
Args:
env_id: The environment id with the expected format of ``[(namespace)/]id[-v(version)]``
Returns:
The environment spec if it exists
Raises:
Error: If the environment id doesn't exist
"""
env_spec = registry.get(env_id)
if env_spec is None:
ns, name, version = parse_env_id(env_id)
_check_version_exists(ns, name, version)
raise error.Error(f"No registered env with id: {env_id}")
else:
assert isinstance(
env_spec, EnvSpec
), f"Expected the registry for {env_id} to be an `EnvSpec`, actual type is {type(env_spec)}"
return env_spec
def pprint_registry(
print_registry: dict[str, EnvSpec] = registry,
*,
num_cols: int = 3,
exclude_namespaces: list[str] | None = None,
disable_print: bool = False,
) -> str | None:
"""Pretty prints all environments in the :attr:`registry`.
Note:
All arguments are keyword only
Args:
print_registry: Environment registry to be printed. By default, :attr:`registry`
num_cols: Number of columns to arrange environments in, for display.
exclude_namespaces: A list of namespaces to be excluded from printing. Helpful if only ALE environments are wanted.
disable_print: Whether to return a string of all the namespaces and environment IDs
or to print the string to console.
"""
# Defaultdict to store environment ids according to namespace.
namespace_envs: dict[str, list[str]] = defaultdict(list)
max_justify = float("-inf")
# Find the namespace associated with each environment spec
for env_spec in print_registry.values():
ns = env_spec.namespace
if ns is None and isinstance(env_spec.entry_point, str):
# Use regex to obtain namespace from entrypoints.
env_entry_point = re.sub(r":\w+", "", env_spec.entry_point)
split_entry_point = env_entry_point.split(".")
if len(split_entry_point) >= 3:
# If namespace is of the format:
# - gymnasium.envs.mujoco.ant_v4:AntEnv
# - gymnasium.envs.mujoco:HumanoidEnv
ns = split_entry_point[2]
elif len(split_entry_point) > 1:
# If namespace is of the format - shimmy.atari_env
ns = split_entry_point[1]
else:
# If namespace cannot be found, default to env name
ns = env_spec.name
namespace_envs[ns].append(env_spec.id)
max_justify = max(max_justify, len(env_spec.name))
# Iterate through each namespace and print environment alphabetically
output: list[str] = []
for ns, env_ids in namespace_envs.items():
# Ignore namespaces to exclude.
if exclude_namespaces is not None and ns in exclude_namespaces:
continue
# Print the namespace
namespace_output = f"{'=' * 5} {ns} {'=' * 5}\n"
# Reference: https://stackoverflow.com/a/33464001
for count, env_id in enumerate(sorted(env_ids), 1):
# Print column with justification.
namespace_output += env_id.ljust(max_justify) + " "
# Once all rows printed, switch to new column.
if count % num_cols == 0:
namespace_output = namespace_output.rstrip(" ")
if count != len(env_ids):
namespace_output += "\n"
output.append(namespace_output.rstrip(" "))
if disable_print:
return "\n".join(output)
else:
print("\n".join(output))
| VectorizeMode |
python | scrapy__scrapy | tests/test_downloadermiddleware_retry.py | {
"start": 4934,
"end": 8511
} | class ____:
invalid_url = "http://www.scrapytest.org/invalid_url"
def get_middleware(self, settings=None):
crawler = get_crawler(DefaultSpider, settings or {})
crawler.spider = crawler._create_spider()
return RetryMiddleware.from_crawler(crawler)
def test_with_settings_zero(self):
max_retry_times = 0
settings = {"RETRY_TIMES": max_retry_times}
middleware = self.get_middleware(settings)
req = Request(self.invalid_url)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_with_metakey_zero(self):
max_retry_times = 0
middleware = self.get_middleware()
meta = {"max_retry_times": max_retry_times}
req = Request(self.invalid_url, meta=meta)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_without_metakey(self):
max_retry_times = 5
settings = {"RETRY_TIMES": max_retry_times}
middleware = self.get_middleware(settings)
req = Request(self.invalid_url)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_with_metakey_greater(self):
meta_max_retry_times = 3
middleware_max_retry_times = 2
req1 = Request(self.invalid_url, meta={"max_retry_times": meta_max_retry_times})
req2 = Request(self.invalid_url)
settings = {"RETRY_TIMES": middleware_max_retry_times}
middleware = self.get_middleware(settings)
self._test_retry(
req1,
DNSLookupError("foo"),
meta_max_retry_times,
middleware=middleware,
)
self._test_retry(
req2,
DNSLookupError("foo"),
middleware_max_retry_times,
middleware=middleware,
)
def test_with_metakey_lesser(self):
meta_max_retry_times = 4
middleware_max_retry_times = 5
req1 = Request(self.invalid_url, meta={"max_retry_times": meta_max_retry_times})
req2 = Request(self.invalid_url)
settings = {"RETRY_TIMES": middleware_max_retry_times}
middleware = self.get_middleware(settings)
self._test_retry(
req1,
DNSLookupError("foo"),
meta_max_retry_times,
middleware=middleware,
)
self._test_retry(
req2,
DNSLookupError("foo"),
middleware_max_retry_times,
middleware=middleware,
)
def test_with_dont_retry(self):
max_retry_times = 4
middleware = self.get_middleware()
meta = {
"max_retry_times": max_retry_times,
"dont_retry": True,
}
req = Request(self.invalid_url, meta=meta)
self._test_retry(
req,
DNSLookupError("foo"),
0,
middleware=middleware,
)
def _test_retry(
self,
req,
exception,
max_retry_times,
middleware=None,
):
middleware = middleware or self.mw
for i in range(max_retry_times):
req = middleware.process_exception(req, exception)
assert isinstance(req, Request)
# discard it
req = middleware.process_exception(req, exception)
assert req is None
| TestMaxRetryTimes |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_util__embed.py | {
"start": 8866,
"end": 13049
} | class ____:
def test_single_model_with_document(self) -> None:
# should use existing doc in with-block
p = SomeModel()
d = Document()
orig_theme = d.theme
d.add_root(p)
with beu.OutputDocumentFor([p], apply_theme=Theme(json={})):
assert p.document is d
assert d.theme is not orig_theme
assert p.document is d
assert d.theme is orig_theme
def test_single_model_with_no_document(self) -> None:
p = SomeModel()
assert p.document is None
with beu.OutputDocumentFor([p], apply_theme=Theme(json={})):
assert p.document is not None
new_theme = p.document.theme
assert p.document is not None
assert p.document.theme is not new_theme
def test_list_of_model_with_no_documents(self) -> None:
# should create new (permanent) doc for inputs
p1 = SomeModel()
p2 = SomeModel()
assert p1.document is None
assert p2.document is None
with beu.OutputDocumentFor([p1, p2], apply_theme=Theme(json={})):
assert p1.document is not None
assert p2.document is not None
assert p1.document is p2.document
new_doc = p1.document
new_theme = p1.document.theme
assert p1.document is new_doc
assert p2.document is new_doc
assert p1.document is p2.document
# should restore to default theme after with-block
assert p1.document.theme is not new_theme
def test_list_of_model_same_as_roots(self) -> None:
# should use existing doc in with-block
p1 = SomeModel()
p2 = SomeModel()
d = Document()
orig_theme = d.theme
d.add_root(p1)
d.add_root(p2)
with beu.OutputDocumentFor([p1, p2], apply_theme=Theme(json={})):
assert p1.document is d
assert p2.document is d
assert d.theme is not orig_theme
assert p1.document is d
assert p2.document is d
assert d.theme is orig_theme
def test_list_of_model_same_as_roots_with_always_new(self) -> None:
# should use new temp doc for everything inside with-block
p1 = SomeModel()
p2 = SomeModel()
d = Document()
orig_theme = d.theme
d.add_root(p1)
d.add_root(p2)
with beu.OutputDocumentFor([p1, p2], always_new=True, apply_theme=Theme(json={})):
assert p1.document is not d
assert p2.document is not d
assert p1.document is p2.document
assert p2.document.theme is not orig_theme
assert p1.document is d
assert p2.document is d
assert d.theme is orig_theme
def test_list_of_model_subset_roots(self) -> None:
# should use new temp doc for subset inside with-block
p1 = SomeModel()
p2 = SomeModel()
d = Document()
orig_theme = d.theme
d.add_root(p1)
d.add_root(p2)
with beu.OutputDocumentFor([p1], apply_theme=Theme(json={})):
assert p1.document is not d
assert p2.document is d
assert p1.document.theme is not orig_theme
assert p2.document.theme is orig_theme
assert p1.document is d
assert p2.document is d
assert d.theme is orig_theme
def test_list_of_models_different_docs(self) -> None:
# should use new temp doc for everything inside with-block
d = Document()
orig_theme = d.theme
p1 = SomeModel()
p2 = SomeModel()
d.add_root(p2)
assert p1.document is None
assert p2.document is not None
with beu.OutputDocumentFor([p1, p2], apply_theme=Theme(json={})):
assert p1.document is not None
assert p2.document is not None
assert p1.document is not d
assert p2.document is not d
assert p1.document == p2.document
assert p1.document.theme is not orig_theme
assert p1.document is None
assert p2.document is not None
assert p2.document.theme is orig_theme
| Test_OutputDocumentFor_custom_apply_theme |
python | scipy__scipy | scipy/integrate/tests/test_quadpack.py | {
"start": 15839,
"end": 25678
} | class ____:
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
@pytest.mark.xslow
@pytest.mark.parametrize(
"x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
[
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 0] for all n.
(-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (one at a time).
(-np.inf, -1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (two at a time).
(-np.inf, -1, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, -1, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, 0, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for all n.
(-np.inf, -1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
(-np.inf, -1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
(-np.inf, 1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (one at a time).
(-np.inf, 1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (two at a time).
(-np.inf, 1, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 1, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 0, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for all n.
(-np.inf, 1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [0, inf] for all n.
(0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (one at a time).
(1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (two at a time).
(1, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(1, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(0, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for all n.
(1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (one at a time).
(-1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (two at a time).
(-1, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-1, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(0, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for all n.
(-1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
(1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
(1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
(1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
(-1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
(-1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
(-1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, inf] for all n.
(-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
np.pi ** (3 / 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [0, 0] for each n (one at a time).
(0, 0, 0, np.inf, 0, np.inf, 0),
(0, np.inf, 0, 0, 0, np.inf, 0),
(0, np.inf, 0, np.inf, 0, 0, 0),
],
)
def test_triple_integral_improper(
self,
x_lower,
x_upper,
y_lower,
y_upper,
z_lower,
z_upper,
expected
):
# The Gaussian Integral.
def f(x, y, z):
return np.exp(-x ** 2 - y ** 2 - z ** 2)
assert_quad(
tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
expected,
error_tolerance=6e-8
)
@make_xp_test_case(nquad)
| TestTplquad |
python | ray-project__ray | python/ray/air/tests/test_integration_comet.py | {
"start": 1828,
"end": 2550
} | class ____(unittest.TestCase):
def setUp(self):
self.logger = CometLoggerCallback()
def test_check_key_name(self):
logger = self.logger
# Return True when key == item
self.assertTrue(logger._check_key_name("name", "name"))
# Return True when key.startswith(item + "/")
self.assertTrue(logger._check_key_name("name/", "name"))
# Return False when item.startswith(key + "/")
self.assertFalse(logger._check_key_name("name", "name/"))
# Return False when key != item and not key.startswith(item."/")
self.assertFalse(logger._check_key_name("name", "x"))
@patch("comet_ml.OfflineExperiment")
@patch("comet_ml.Experiment")
| HelperMethodTests |
python | keras-team__keras | keras/src/metrics/accuracy_metrics_test.py | {
"start": 99,
"end": 2855
} | class ____(testing.TestCase):
def test_config(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
self.assertEqual(acc_obj.name, "accuracy")
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj._dtype, "float32")
# Test get_config
acc_obj_config = acc_obj.get_config()
self.assertEqual(acc_obj_config["name"], "accuracy")
self.assertEqual(acc_obj_config["dtype"], "float32")
# Check save and restore config
acc_obj2 = accuracy_metrics.Accuracy.from_config(acc_obj_config)
self.assertEqual(acc_obj2.name, "accuracy")
self.assertEqual(len(acc_obj2.variables), 2)
self.assertEqual(acc_obj2._dtype, "float32")
def test_unweighted(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1], [2], [3], [4]])
y_pred = np.array([[0], [2], [3], [4]])
acc_obj.update_state(y_true, y_pred)
result = acc_obj.result()
self.assertAllClose(result, 0.75, atol=1e-3)
def test_weighted(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1], [2], [3], [4]])
y_pred = np.array([[0], [2], [3], [4]])
sample_weight = np.array([1, 1, 0, 0])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_rank_1(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([1, 2, 3, 4])
y_pred = np.array([0, 2, 3, 4])
sample_weight = np.array([1, 1, 0, 0])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_weights(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1, 2], [3, 4]])
y_pred = np.array([[0, 2], [3, 4]])
sample_weight = np.array([[1, 0], [0, 1]])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
def test_weighted_nd_broadcast_weights(self):
acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32")
y_true = np.array([[1, 2], [3, 4]])
y_pred = np.array([[0, 2], [3, 4]])
sample_weight = np.array([[1, 0]])
acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight)
result = acc_obj.result()
self.assertAllClose(result, 0.5, atol=1e-3)
| AccuracyTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_hyperparameter_tuning_job.py | {
"start": 7740,
"end": 10809
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = HyperparameterTuningJobHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(HYPERPARAMETER_TUNING_JOB_HOOK_STRING.format("get_job_service_client"))
def test_delete_hyperparameter_tuning_job(self, mock_client) -> None:
self.hook.delete_hyperparameter_tuning_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
hyperparameter_tuning_job=TEST_HYPERPARAMETER_TUNING_JOB_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_hyperparameter_tuning_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.hyperparameter_tuning_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.hyperparameter_tuning_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_HYPERPARAMETER_TUNING_JOB_ID,
)
@mock.patch(HYPERPARAMETER_TUNING_JOB_HOOK_STRING.format("get_job_service_client"))
def test_get_hyperparameter_tuning_job(self, mock_client) -> None:
self.hook.get_hyperparameter_tuning_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
hyperparameter_tuning_job=TEST_HYPERPARAMETER_TUNING_JOB_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_hyperparameter_tuning_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.hyperparameter_tuning_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.hyperparameter_tuning_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_HYPERPARAMETER_TUNING_JOB_ID,
)
@mock.patch(HYPERPARAMETER_TUNING_JOB_HOOK_STRING.format("get_job_service_client"))
def test_list_hyperparameter_tuning_jobs(self, mock_client) -> None:
self.hook.list_hyperparameter_tuning_jobs(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_hyperparameter_tuning_jobs.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
filter=None,
page_size=None,
page_token=None,
read_mask=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
| TestHyperparameterTuningJobWithoutDefaultProjectIdHook |
python | huggingface__transformers | src/transformers/models/mvp/modeling_mvp.py | {
"start": 68680,
"end": 69121
} | class ____(MvpPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = MvpDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
| MvpDecoderWrapper |
python | walkccc__LeetCode | solutions/2141. Maximum Running Time of N Computers/2141.py | {
"start": 0,
"end": 467
} | class ____:
def maxRunTime(self, n: int, batteries: list[int]) -> int:
summ = sum(batteries)
batteries.sort()
# The maximum battery is greater than the average, so it can last forever.
# Reduce the problem from size n to size n - 1.
while batteries[-1] > summ // n:
summ -= batteries.pop()
n -= 1
# If the maximum battery <= average running time, it won't be waste, and so
# do smaller batteries.
return summ // n
| Solution |
python | Farama-Foundation__Gymnasium | gymnasium/spaces/sequence.py | {
"start": 275,
"end": 10197
} | class ____(Space[Union[tuple[Any, ...], Any]]):
r"""This space represent sets of finite-length sequences.
This space represents the set of tuples of the form :math:`(a_0, \dots, a_n)` where the :math:`a_i` belong
to some space that is specified during initialization and the integer :math:`n` is not fixed
Example:
>>> from gymnasium.spaces import Sequence, Box
>>> observation_space = Sequence(Box(0, 1), seed=0)
>>> observation_space.sample()
(array([0.6822636], dtype=float32), array([0.18933342], dtype=float32), array([0.19049619], dtype=float32))
>>> observation_space.sample()
(array([0.83506], dtype=float32), array([0.9053838], dtype=float32), array([0.5836242], dtype=float32), array([0.63214064], dtype=float32))
Example with stacked observations
>>> observation_space = Sequence(Box(0, 1), stack=True, seed=0)
>>> observation_space.sample()
array([[0.6822636 ],
[0.18933342],
[0.19049619]], dtype=float32)
"""
def __init__(
self,
space: Space[Any],
seed: int | np.random.Generator | None = None,
stack: bool = False,
):
"""Constructor of the :class:`Sequence` space.
Args:
space: Elements in the sequences this space represent must belong to this space.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space.
stack: If ``True`` then the resulting samples would be stacked.
"""
assert isinstance(
space, Space
), f"Expects the feature space to be instance of a gym Space, actual type: {type(space)}"
self.feature_space = space
self.stack = stack
if self.stack:
self.stacked_feature_space: Space = gym.vector.utils.batch_space(
self.feature_space, 1
)
# None for shape and dtype, since it'll require special handling
super().__init__(None, None, seed)
def seed(self, seed: int | tuple[int, int] | None = None) -> tuple[int, int]:
"""Seed the PRNG of the Sequence space and the feature space.
Depending on the type of seed, the subspaces will be seeded differently
* ``None`` - All the subspaces will use a random initial seed
* ``Int`` - The integer is used to seed the :class:`Sequence` space that is used to generate a seed value for the feature space.
* ``Tuple of ints`` - A tuple for the :class:`Sequence` and feature space.
Args:
seed: An optional int or tuple of ints to seed the PRNG. See above for more details
Returns:
A tuple of the seeding values for the Sequence and feature space
"""
if seed is None:
return super().seed(None), self.feature_space.seed(None)
elif isinstance(seed, int):
super_seed = super().seed(seed)
feature_seed = int(self.np_random.integers(np.iinfo(np.int32).max))
# this is necessary such that after int or list/tuple seeding, the Sequence PRNG are equivalent
super().seed(seed)
return super_seed, self.feature_space.seed(feature_seed)
elif isinstance(seed, (tuple, list)):
if len(seed) != 2:
raise ValueError(
f"Expects the seed to have two elements for the Sequence and feature space, actual length: {len(seed)}"
)
return super().seed(seed[0]), self.feature_space.seed(seed[1])
else:
raise TypeError(
f"Expected None, int, tuple of ints, actual type: {type(seed)}"
)
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return False
def sample(
self,
mask: None | (
tuple[
None | int | NDArray[np.integer],
Any,
]
) = None,
probability: None | (
tuple[
None | int | NDArray[np.integer],
Any,
]
) = None,
) -> tuple[Any] | Any:
"""Generates a single random sample from this space.
Args:
mask: An optional mask for (optionally) the length of the sequence and (optionally) the values in the sequence.
If you specify ``mask``, it is expected to be a tuple of the form ``(length_mask, sample_mask)`` where ``length_mask`` is
* ``None`` - The length will be randomly drawn from a geometric distribution
* ``int`` - Fixed length
* ``np.ndarray`` of integers - Length of the sampled sequence is randomly drawn from this array.
The second element of the tuple ``sample_mask`` specifies how the feature space will be sampled.
Depending on if mask or probability is used will affect what argument is used.
probability: See mask description above, the only difference is on the ``sample_mask`` for the feature space being probability rather than mask.
Returns:
A tuple of random length with random samples of elements from the :attr:`feature_space`.
"""
if mask is not None and probability is not None:
raise ValueError(
f"Only one of `mask` or `probability` can be provided, actual values: mask={mask}, probability={probability}"
)
elif mask is not None:
sample_length = self.generate_sample_length(mask[0], "mask")
sampled_values = tuple(
self.feature_space.sample(mask=mask[1]) for _ in range(sample_length)
)
elif probability is not None:
sample_length = self.generate_sample_length(probability[0], "probability")
sampled_values = tuple(
self.feature_space.sample(probability=probability[1])
for _ in range(sample_length)
)
else:
sample_length = self.np_random.geometric(0.25)
sampled_values = tuple(
self.feature_space.sample() for _ in range(sample_length)
)
if self.stack:
# Concatenate values if stacked.
out = gym.vector.utils.create_empty_array(
self.feature_space, len(sampled_values)
)
return gym.vector.utils.concatenate(self.feature_space, sampled_values, out)
return sampled_values
def generate_sample_length(
self,
length_mask: None | np.integer | NDArray[np.integer],
mask_type: None | str,
) -> int:
"""Generate the sample length for a given length mask and mask type."""
if length_mask is not None:
if np.issubdtype(type(length_mask), np.integer):
assert (
0 <= length_mask
), f"Expects the length mask of `{mask_type}` to be greater than or equal to zero, actual value: {length_mask}"
return length_mask
elif isinstance(length_mask, np.ndarray):
assert (
len(length_mask.shape) == 1
), f"Expects the shape of the length mask of `{mask_type}` to be 1-dimensional, actual shape: {length_mask.shape}"
assert np.all(
0 <= length_mask
), f"Expects all values in the length_mask of `{mask_type}` to be greater than or equal to zero, actual values: {length_mask}"
assert np.issubdtype(
length_mask.dtype, np.integer
), f"Expects the length mask array of `{mask_type}` to have dtype of np.integer, actual type: {length_mask.dtype}"
return self.np_random.choice(length_mask)
else:
raise TypeError(
f"Expects the type of length_mask of `{mask_type}` to be an integer or a np.ndarray, actual type: {type(length_mask)}"
)
else:
# The choice of 0.25 is arbitrary
return self.np_random.geometric(0.25)
def contains(self, x: Any) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
# by definition, any sequence is an iterable
if self.stack:
return all(
item in self.feature_space
for item in gym.vector.utils.iterate(self.stacked_feature_space, x)
)
else:
return isinstance(x, tuple) and all(
self.feature_space.contains(item) for item in x
)
def __repr__(self) -> str:
"""Gives a string representation of this space."""
return f"Sequence({self.feature_space}, stack={self.stack})"
def to_jsonable(
self, sample_n: typing.Sequence[tuple[Any, ...] | Any]
) -> list[list[Any]]:
"""Convert a batch of samples from this space to a JSONable data type."""
if self.stack:
return self.stacked_feature_space.to_jsonable(sample_n)
else:
return [self.feature_space.to_jsonable(sample) for sample in sample_n]
def from_jsonable(self, sample_n: list[list[Any]]) -> list[tuple[Any, ...] | Any]:
"""Convert a JSONable data type to a batch of samples from this space."""
if self.stack:
return self.stacked_feature_space.from_jsonable(sample_n)
else:
return [
tuple(self.feature_space.from_jsonable(sample)) for sample in sample_n
]
def __eq__(self, other: Any) -> bool:
"""Check whether ``other`` is equivalent to this instance."""
return (
isinstance(other, Sequence)
and self.feature_space == other.feature_space
and self.stack == other.stack
)
| Sequence |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 54067,
"end": 54119
} | class ____: pass
Saveable = SaveableType()
| SaveableType |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/sensors/databricks.py | {
"start": 1469,
"end": 6262
} | class ____(DatabricksSQLStatementsMixin, BaseSensorOperator):
"""DatabricksSQLStatementsSensor."""
template_fields: Sequence[str] = (
"databricks_conn_id",
"statement",
"statement_id",
)
template_ext: Sequence[str] = (".json-tpl",)
ui_color = "#1CB1C2"
ui_fgcolor = "#fff"
def __init__(
self,
warehouse_id: str,
*,
statement: str | None = None,
statement_id: str | None = None,
catalog: str | None = None,
schema: str | None = None,
parameters: list[dict[str, Any]] | None = None,
databricks_conn_id: str = "databricks_default",
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
databricks_retry_args: dict[Any, Any] | None = None,
do_xcom_push: bool = True,
wait_for_termination: bool = True,
timeout: float = 3600,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
# Handle the scenario where either both statement and statement_id are set/not set
if statement and statement_id:
raise AirflowException("Cannot provide both statement and statement_id.")
if not statement and not statement_id:
raise AirflowException("One of either statement or statement_id must be provided.")
if not warehouse_id:
raise AirflowException("warehouse_id must be provided.")
super().__init__(**kwargs)
self.statement = statement
self.statement_id = statement_id
self.warehouse_id = warehouse_id
self.catalog = catalog
self.schema = schema
self.parameters = parameters
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.databricks_retry_args = databricks_retry_args
self.wait_for_termination = wait_for_termination
self.deferrable = deferrable
self.timeout = timeout
self.do_xcom_push = do_xcom_push
@cached_property
def _hook(self):
return self._get_hook(caller="DatabricksSQLStatementsSensor")
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=caller,
)
def execute(self, context: Context):
if not self.statement_id:
# Otherwise, we'll go ahead and "submit" the statement
json = {
"statement": self.statement,
"warehouse_id": self.warehouse_id,
"catalog": self.catalog,
"schema": self.schema,
"parameters": self.parameters,
"wait_timeout": "0s",
}
self.statement_id = self._hook.post_sql_statement(json)
self.log.info("SQL Statement submitted with statement_id: %s", self.statement_id)
if self.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_STATEMENT_ID_KEY, value=self.statement_id)
# If we're not waiting for the query to complete execution, then we'll go ahead and return. However, a
# recommendation to use the DatabricksSQLStatementOperator is made in this case
if not self.wait_for_termination:
self.log.info(
"If setting wait_for_termination = False, consider using the DatabricksSQLStatementsOperator instead."
)
return
if self.deferrable:
self._handle_deferrable_execution(defer_method_name=DEFER_METHOD_NAME) # type: ignore[misc]
def poke(self, context: Context):
"""
Handle non-deferrable Sensor execution.
:param context: (Context)
:return: (bool)
"""
# This is going to very closely mirror the execute_complete
statement_state: SQLStatementState = self._hook.get_sql_statement_state(self.statement_id)
if statement_state.is_running:
self.log.info("SQL Statement with ID %s is running", self.statement_id)
return False
if statement_state.is_successful:
self.log.info("SQL Statement with ID %s completed successfully.", self.statement_id)
return True
raise AirflowException(
f"SQL Statement with ID {statement_state} failed with error: {statement_state.error_message}"
)
| DatabricksSQLStatementsSensor |
python | numpy__numpy | numpy/matrixlib/tests/test_matrix_linalg.py | {
"start": 1538,
"end": 1598
} | class ____(CondCases, MatrixTestCase):
pass
| TestCondMatrix |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/waiters.py | {
"start": 2537,
"end": 4787
} | class ____(Waiter[T]):
# Implementation of `Waiter` for use in synchronous contexts
def __init__(self, call: Call[T]) -> None:
super().__init__(call=call)
self._queue: queue.Queue[Optional[Call[T]]] = queue.Queue()
self._done_callbacks: list[Call[Any]] = []
self._done_event = threading.Event()
def submit(self, call: Call[T]) -> Call[T]:
"""
Submit a callback to execute while waiting.
"""
if self.call_is_done():
raise RuntimeError(f"The call {self._call} is already done.")
self._queue.put_nowait(call)
call.set_runner(self)
return call
def _handle_waiting_callbacks(self) -> None:
logger.debug("Waiter %r watching for callbacks", self)
while True:
callback = self._queue.get()
if callback is None:
break
# Ensure that callbacks are cancelled if the parent call is cancelled so
# waiting never runs longer than the call
self._call.future.add_cancel_callback(callback.future.cancel)
callback.run()
del callback
@contextlib.contextmanager
def _handle_done_callbacks(self) -> Generator[None, Any, None]:
try:
yield
finally:
# Call done callbacks
while self._done_callbacks:
callback = self._done_callbacks.pop()
if callback:
callback.run()
def add_done_callback(self, callback: Call[Any]) -> None:
if self._done_event.is_set():
raise RuntimeError("Cannot add done callbacks to done waiters.")
else:
self._done_callbacks.append(callback)
def wait(self) -> Call[T]:
# Stop watching for work once the future is done
self._call.future.add_done_callback(lambda _: self._queue.put_nowait(None))
self._call.future.add_done_callback(lambda _: self._done_event.set())
with self._handle_done_callbacks():
self._handle_waiting_callbacks()
# Wait for the future to be done
self._done_event.wait()
_WAITERS_BY_THREAD[self._owner_thread].remove(self)
return self._call
| SyncWaiter |
python | ray-project__ray | python/ray/data/_internal/datasource/lance_datasink.py | {
"start": 4528,
"end": 7305
} | class ____(_BaseLanceDatasink):
"""Lance Ray Datasink.
Write a Ray dataset to lance.
If we expect to write larger-than-memory files,
we can use `LanceFragmentWriter` and `LanceCommitter`.
Args:
uri : the base URI of the dataset.
schema : pyarrow.Schema, optional.
The schema of the dataset.
mode : str, optional
The write mode. Default is 'append'.
Choices are 'append', 'create', 'overwrite'.
min_rows_per_file : int, optional
The minimum number of rows per file. Default is 1024 * 1024.
max_rows_per_file : int, optional
The maximum number of rows per file. Default is 64 * 1024 * 1024.
data_storage_version: optional, str, default None
The version of the data storage format to use. Newer versions are more
efficient but require newer versions of lance to read. The default is
"legacy" which will use the legacy v1 version. See the user guide
for more details.
storage_options : Dict[str, Any], optional
The storage options for the writer. Default is None.
"""
NAME = "Lance"
def __init__(
self,
uri: str,
schema: Optional[pa.Schema] = None,
mode: Literal["create", "append", "overwrite"] = "create",
min_rows_per_file: int = 1024 * 1024,
max_rows_per_file: int = 64 * 1024 * 1024,
data_storage_version: Optional[str] = None,
storage_options: Optional[Dict[str, Any]] = None,
*args,
**kwargs,
):
super().__init__(
uri,
schema=schema,
mode=mode,
storage_options=storage_options,
*args,
**kwargs,
)
self.min_rows_per_file = min_rows_per_file
self.max_rows_per_file = max_rows_per_file
self.data_storage_version = data_storage_version
# if mode is append, read_version is read from existing dataset.
self.read_version: Optional[int] = None
@property
def min_rows_per_write(self) -> int:
return self.min_rows_per_file
def get_name(self) -> str:
return self.NAME
def write(
self,
blocks: Iterable[Union[pa.Table, "pd.DataFrame"]],
_ctx,
):
fragments_and_schema = _write_fragment(
blocks,
self.uri,
schema=self.schema,
max_rows_per_file=self.max_rows_per_file,
data_storage_version=self.data_storage_version,
storage_options=self.storage_options,
)
return [
(pickle.dumps(fragment), pickle.dumps(schema))
for fragment, schema in fragments_and_schema
]
| LanceDatasink |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0050_build_readthedocs_yaml_path.py | {
"start": 189,
"end": 821
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0049_automation_rule_copy"),
]
operations = [
migrations.AddField(
model_name="build",
name="readthedocs_yaml_path",
field=models.CharField(
blank=True,
default=None,
max_length=1024,
null=True,
validators=[readthedocs.projects.validators.validate_build_config_file],
verbose_name="Custom build configuration file path used in this build",
),
),
]
| Migration |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 13163,
"end": 13652
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self, sparse_container=None):
self.sparse_container = sparse_container
def fit(self, X, y=None):
validate_data(self, X)
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X):
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=True, reset=False)
return self.sparse_container(X)
| SparseTransformer |
python | matplotlib__matplotlib | lib/matplotlib/offsetbox.py | {
"start": 54623,
"end": 55108
} | class ____(DraggableBase):
def __init__(self, annotation, use_blit=False):
super().__init__(annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
self.ox, self.oy = ann.get_transform().transform(ann.xyann)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = ann.get_transform().inverted().transform(
(self.ox + dx, self.oy + dy))
| DraggableAnnotation |
python | getsentry__sentry | tests/sentry/snuba/metrics/fields/test_base.py | {
"start": 25091,
"end": 26699
} | class ____(TestCase):
def test_session_duration_derived_alias(self) -> None:
org_id = self.project.organization_id
use_case_id = UseCaseID.SESSIONS
session_duration_derived_alias = DERIVED_ALIASES[SessionMRI.DURATION.value]
assert session_duration_derived_alias.generate_filter_snql_conditions(
org_id, use_case_id
) == Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
resolve_weak(use_case_id, org_id, SessionMRI.RAW_DURATION.value),
],
),
Function(
"equals",
(
Column(f"tags[{resolve_weak(use_case_id, org_id, 'session.status')}]"),
resolve_tag_value(use_case_id, org_id, "exited"),
),
),
],
)
@pytest.mark.parametrize(
"metric_mri,expected_entity",
[
("c:sessions/session@none", "metrics_counters"),
("s:sessions/user@none", "metrics_sets"),
("d:sessions/duration@second", "metrics_distributions"),
("d:sessions/unknown_metric@second", None),
("e:sessions/all@none", None), # derived metric
("", None),
("foo", None),
("foo:foo:foo", None),
],
)
def test_known_entity_of_metric_mri(metric_mri: str, expected_entity: str | None) -> None:
assert _get_known_entity_of_metric_mri(metric_mri) == expected_entity
| DerivedMetricAliasTestCase |
python | kamyu104__LeetCode-Solutions | Python/number-of-connected-components-in-an-undirected-graph.py | {
"start": 541,
"end": 841
} | class ____(object):
def countComponents(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
union_find = UnionFind(n)
for i, j in edges:
union_find.union_set(i, j)
return union_find.count
| Solution |
python | ApeWorX__ape | src/ape/pytest/runners.py | {
"start": 741,
"end": 14755
} | class ____(ManagerAccessMixin):
def __init__(
self,
config_wrapper: "ConfigWrapper",
isolation_manager: "IsolationManager",
receipt_capture: "ReceiptCapture",
gas_tracker: "GasTracker",
coverage_tracker: "CoverageTracker",
fixture_manager: Optional["FixtureManager"] = None,
):
self.config_wrapper = config_wrapper
self.isolation_manager = isolation_manager
self.receipt_capture = receipt_capture
self._provider_is_connected = False
# Ensure the gas report starts off None for this runner.
gas_tracker.session_gas_report = None
self.gas_tracker = gas_tracker
self.coverage_tracker = coverage_tracker
if fixture_manager is None:
from ape.pytest.fixtures import FixtureManager
self.fixture_manager = FixtureManager(config_wrapper, isolation_manager)
else:
self.fixture_manager = fixture_manager
self._initialized_fixtures: list[str] = []
self._finalized_fixtures: list[str] = []
@property
def _provider_context(self) -> "ProviderContextManager":
return self.network_manager.parse_network_choice(self.config_wrapper.network)
@property
def _coverage_report(self) -> Optional["CoverageReport"]:
return self.coverage_tracker.data.report if self.coverage_tracker.data else None
def pytest_exception_interact(self, report, call):
"""
A ``-I`` option triggers when an exception is raised which can be interactively handled.
Outputs the full ``repr`` of the failed test and opens an interactive shell using the
same console as the ``ape console`` command.
"""
# Find the last traceback frame within the active project
tb_frames: PytestTraceback = call.excinfo.traceback
base = self.local_project.path.as_posix()
if self.config_wrapper.show_internal:
relevant_tb = list(tb_frames)
else:
relevant_tb = [
f
for f in tb_frames
if Path(f.path).as_posix().startswith(base) or Path(f.path).name.startswith("test_")
]
if relevant_tb:
call.excinfo.traceback = PytestTraceback(relevant_tb)
# Only show locals if not digging into the framework's traceback.
# Else, it gets way too noisy.
show_locals = not self.config_wrapper.show_internal
try:
here = Path.cwd()
except FileNotFoundError:
pass # In a temp-folder, most likely.
else:
report.longrepr = call.excinfo.getrepr(
funcargs=True,
abspath=here,
showlocals=show_locals,
style="short",
tbfilter=False,
truncate_locals=True,
chain=False,
)
if self.config_wrapper.interactive and report.failed:
from ape_console._cli import console
traceback = call.excinfo.traceback[-1]
# Suspend capsys to ignore our own output.
capman = self.config_wrapper.get_pytest_plugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
# Show the exception info before launching the interactive session.
click.echo()
rich_print(str(report.longrepr))
click.echo()
# get global namespace
globals_dict = traceback.frame.f_globals
# filter python internals and pytest internals
globals_dict = {
k: v
for k, v in globals_dict.items()
if not k.startswith("__") and not k.startswith("@")
}
# filter fixtures
globals_dict = {
k: v for k, v in globals_dict.items() if not hasattr(v, "_pytestfixturefunction")
}
# get local namespace
locals_dict = traceback.locals
locals_dict = {k: v for k, v in locals_dict.items() if not k.startswith("@")}
click.echo("Starting interactive mode. Type `exit` to halt current test.")
namespace = {"_callinfo": call, **globals_dict, **locals_dict}
console(extra_locals=namespace, project=self.local_project, embed=True)
if capman:
capman.resume_global_capture()
if type(call.excinfo.value) in (SystemExit, KeyboardInterrupt):
# This will show the rest of Ape Test output as if the
# tests had stopped here.
pytest.exit("`ape test` exited.")
def pytest_runtest_setup(self, item):
"""
By default, insert isolation fixtures into each test cases list of fixtures
prior to actually executing the test case.
https://docs.pytest.org/en/6.2.x/reference.html#pytest.hookspec.pytest_runtest_setup
"""
if (
not self.config_wrapper.isolation
# doctests don't have fixturenames
or (hasattr(pytest, "DoctestItem") and isinstance(item, pytest.DoctestItem))
or "_function_isolation" in item.fixturenames # prevent double injection
):
# isolation is disabled via cmdline option or running doc-tests.
return
if self.config_wrapper.isolation:
self._setup_isolation(item)
def _setup_isolation(self, item):
fixtures = self.fixture_manager.get_fixtures(item)
for scope in (Scope.SESSION, Scope.PACKAGE, Scope.MODULE, Scope.CLASS):
if not (
custom_fixtures := [f for f in fixtures[scope] if self.fixture_manager.is_custom(f)]
):
# Intermediate scope isolations aren't filled in, or only using
# built-in Ape fixtures.
continue
snapshot = self.isolation_manager.get_snapshot(scope)
# Gather new fixtures. Also, be mindful of parametrized fixtures
# which strangely have the same name.
new_fixtures = []
for custom_fixture in custom_fixtures:
# Parametrized fixtures must always be considered new
# because of severe complications of using them.
is_custom = custom_fixture in fixtures.parametrized
is_iterating = is_custom and fixtures.is_iterating(custom_fixture)
is_new = custom_fixture not in snapshot.fixtures
# NOTE: Consider ``None`` to be stateful here to be safe.
stateful = self.fixture_manager.is_stateful(custom_fixture) is not False
if (is_new or is_iterating) and stateful:
new_fixtures.append(custom_fixture)
continue
# Rebase if there are new fixtures found of non-function scope.
# And there are stateful fixtures of lower scopes that need resetting.
if self.fixture_manager.needs_rebase(new_fixtures, snapshot):
self.fixture_manager.rebase(scope, fixtures)
# Append these fixtures so we know when new ones arrive
# and need to trigger the invalidation logic above.
snapshot.append_fixtures(new_fixtures)
fixtures.apply_fixturenames()
def pytest_sessionstart(self):
"""
Called after the `Session` object has been created and before performing
collection and entering the run test loop.
Removes `PytestAssertRewriteWarning` warnings from the terminalreporter.
This prevents warnings that "the `ape` library was already imported and
so related assertions cannot be rewritten". The warning is not relevant
for end users who are performing tests with ape.
"""
reporter = self.config_wrapper.get_pytest_plugin("terminalreporter")
if not reporter:
return
warnings = reporter.stats.pop("warnings", [])
warnings = [i for i in warnings if "PytestAssertRewriteWarning" not in i.message]
if warnings and not self.config_wrapper.disable_warnings:
reporter.stats["warnings"] = warnings
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_call(self, item):
if network_marker := item.get_closest_marker("use_network"):
if len(getattr(network_marker, "args", []) or []) != 1:
raise ValueError("`use_network` marker requires single network choice argument.")
with self.network_manager.parse_network_choice(network_marker.args[0]):
yield
else:
yield
def pytest_fixture_setup(self, fixturedef, request):
fixture_name = fixturedef.argname
if fixture_name in self._initialized_fixtures:
return
self._initialized_fixtures.append(fixture_name)
if self._track_fixture_blocks(fixture_name):
try:
block_number = self.chain_manager.blocks.height
except Exception:
pass
else:
self.fixture_manager.add_fixture_info(fixture_name, setup_block=block_number)
def pytest_fixture_post_finalizer(self, fixturedef, request):
fixture_name = fixturedef.argname
if fixture_name in self._finalized_fixtures:
return
self._finalized_fixtures.append(fixture_name)
if self._track_fixture_blocks(fixture_name):
try:
block_number = self.chain_manager.blocks.height
except ProviderNotConnectedError:
pass
else:
self.fixture_manager.add_fixture_info(fixture_name, teardown_block=block_number)
def _track_fixture_blocks(self, fixture_name: str) -> bool:
if not self.fixture_manager.is_custom(fixture_name):
return False
scope = self.fixture_manager.get_fixture_scope(fixture_name)
if scope in (None, Scope.FUNCTION):
return False
return True
@pytest.hookimpl(trylast=True, hookwrapper=True)
def pytest_collection_finish(self, session):
"""
Called after collection has been performed and modified.
"""
outcome = yield
# Only start provider if collected tests.
if not outcome.get_result() and session.items:
self._connect()
def _connect(self):
if self._provider_context._provider.network.is_mainnet:
# Ensure is not only running on tests on mainnet because
# was configured as the default.
is_from_command_line = (
"--network" in self.config_wrapper.pytest_config.invocation_params.args
)
if not is_from_command_line:
raise ConfigError(
"Default network is mainnet; unable to run tests on mainnet. "
"Please specify the network using the `--network` flag or "
"configure a different default network."
)
self._provider_context.push_provider()
self._provider_is_connected = True
def pytest_terminal_summary(self, terminalreporter):
"""
Add a section to terminal summary reporting.
When ``--gas`` is active, outputs the gas profile report.
"""
if self.config_wrapper.track_gas:
self._show_gas_report(terminalreporter)
if self.config_wrapper.track_coverage:
self._show_coverage_report(terminalreporter)
def _show_gas_report(self, terminalreporter):
terminalreporter.section("Gas Profile")
if not self.network_manager.connected:
# Happens if never needed to connect (no tests)
return
self._log_tracing_support(
terminalreporter, "The gas profile is limited to receipt-level data."
)
if not self.gas_tracker.show_session_gas():
terminalreporter.write_line(
f"{LogLevel.WARNING.name}: No gas usage data found.", yellow=True
)
def _show_coverage_report(self, terminalreporter):
if self.config_wrapper.ape_test_config.coverage.reports.terminal:
terminalreporter.section("Coverage Profile")
if not self.network_manager.connected:
# Happens if never needed to connect (no tests)
return
self._log_tracing_support(
terminalreporter, "Coverage is limited to receipt-level function coverage."
)
if not self.coverage_tracker.show_session_coverage():
terminalreporter.write_line(
f"{LogLevel.WARNING.name}: No coverage data found. "
f"Try re-compiling your contracts using the latest compiler plugins",
yellow=True,
)
def _log_tracing_support(self, terminalreporter, extra_warning: str):
if self.provider.supports_tracing:
return
message = (
f"{LogLevel.ERROR.name}: Provider '{self.provider.name}' does not support "
f"transaction tracing. {extra_warning}"
)
terminalreporter.write_line(message, red=True)
def pytest_unconfigure(self):
if self._provider_is_connected and self.config_wrapper.disconnect_providers_after:
try:
self._provider_context.disconnect_all()
except Exception as err:
logger.error(f"Failed to disconnect {self}: {err}")
else:
self._provider_is_connected = False
# NOTE: Clearing the state is helpful for pytester-based tests,
# which may run pytest many times in-process.
self.receipt_capture.clear()
self.chain_manager.contracts.clear_local_caches()
self.gas_tracker.session_gas_report = None
self.coverage_tracker.reset()
| PytestApeRunner |
python | doocs__leetcode | lcof2/剑指 Offer II 093. 最长斐波那契数列/Solution.py | {
"start": 0,
"end": 596
} | class ____:
def lenLongestFibSubseq(self, arr: List[int]) -> int:
mp = {v: i for i, v in enumerate(arr)}
n = len(arr)
dp = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(i):
dp[j][i] = 2
ans = 0
for i in range(n):
for j in range(i):
delta = arr[i] - arr[j]
if delta in mp:
k = mp[delta]
if k < j:
dp[j][i] = dp[k][j] + 1
ans = max(ans, dp[j][i])
return ans
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 53994,
"end": 54102
} | class ____(Elemwise):
_projection_passthrough = True
_parameters = ["frame"]
operation = M.abs
| Abs |
python | coleifer__peewee | peewee.py | {
"start": 145763,
"end": 146725
} | class ____(object):
def __init__(self, db, *args, **kwargs):
self.db = db
self._transaction_args = (args, kwargs)
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
a, k = self._transaction_args
with _atomic(self.db, *a, **k):
return fn(*args, **kwargs)
return inner
def __enter__(self):
if self.db.transaction_depth() == 0:
args, kwargs = self._transaction_args
self._helper = self.db.transaction(*args, **kwargs)
elif isinstance(self.db.top_transaction(), _manual):
raise ValueError('Cannot enter atomic commit block while in '
'manual commit mode.')
else:
self._helper = self.db.savepoint()
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
| _atomic |
python | keras-team__keras | keras/src/saving/keras_saveable.py | {
"start": 12,
"end": 1285
} | class ____:
# Note: renaming this function will cause old pickles to be broken.
# This is probably not a huge deal, as pickle should not be a recommended
# saving format -- it should only be supported for use with distributed
# computing frameworks.
def _obj_type(self):
raise NotImplementedError(
"KerasSaveable subclases must provide an "
"implementation for `obj_type()`"
)
@classmethod
def _unpickle_model(cls, bytesio):
import keras.src.saving.saving_lib as saving_lib
# pickle is not safe regardless of what you do.
return saving_lib._load_model_from_fileobj(
bytesio, custom_objects=None, compile=True, safe_mode=False
)
def __reduce__(self):
"""__reduce__ is used to customize the behavior of `pickle.pickle()`.
The method returns a tuple of two elements: a function, and a list of
arguments to pass to that function. In this case we just leverage the
keras saving library."""
import keras.src.saving.saving_lib as saving_lib
buf = io.BytesIO()
saving_lib._save_model_to_fileobj(self, buf, "h5")
return (
self._unpickle_model,
(buf,),
)
| KerasSaveable |
python | PrefectHQ__prefect | src/prefect/server/events/ordering/memory.py | {
"start": 1128,
"end": 12601
} | class ____(_CausalOrdering):
# Class-level storage for different scopes
_instances: dict[str, "CausalOrdering"] = {}
_locks: dict[str, asyncio.Lock] = {}
def __new__(cls, scope: str) -> "CausalOrdering":
if scope not in cls._instances:
cls._instances[scope] = super().__new__(cls)
return cls._instances[scope]
def __init__(self, scope: str):
# Only initialize once per scope
if hasattr(self, "_initialized") and self._initialized:
return
self.scope: str = scope
self._processing_events: set[UUID] = set()
self._seen_events: TTLCache[UUID, bool] = TTLCache(
maxsize=10000, ttl=SEEN_EXPIRATION.total_seconds()
)
self._followers: dict[UUID, set[UUID]] = {} # leader_id -> set of follower_ids
self._events: dict[UUID, ReceivedEvent] = {} # event_id -> event
self._waitlist: dict[UUID, datetime] = {} # event_id -> received_time
# Each scope gets its own lock
if scope not in self.__class__._locks:
self.__class__._locks[scope] = asyncio.Lock()
self._lock = self.__class__._locks[scope]
self._initialized = True
def clear(self) -> None:
"""Clear all data for this scope."""
self._processing_events.clear()
self._seen_events.clear()
self._followers.clear()
self._events.clear()
self._waitlist.clear()
@classmethod
def clear_all_scopes(cls) -> None:
"""Clear all data for all scopes - useful for testing."""
for instance in cls._instances.values():
instance.clear()
cls._instances.clear()
cls._locks.clear()
async def record_event_as_processing(self, event: ReceivedEvent) -> bool:
"""Record that an event is being processed, returning False if already processing."""
async with self._lock:
if event.id in self._processing_events:
return False
self._processing_events.add(event.id)
return True
async def event_has_started_processing(self, event: UUID | Event) -> bool:
event_id = event.id if isinstance(event, Event) else event
async with self._lock:
return event_id in self._processing_events
async def forget_event_is_processing(self, event: ReceivedEvent) -> None:
async with self._lock:
self._processing_events.discard(event.id)
async def event_has_been_seen(self, event: UUID | Event) -> bool:
event_id = event.id if isinstance(event, Event) else event
async with self._lock:
return event_id in self._seen_events
async def record_event_as_seen(self, event: ReceivedEvent) -> None:
async with self._lock:
self._seen_events[event.id] = True
async def record_follower(self, event: ReceivedEvent) -> None:
"""Remember that this event is waiting on another event to arrive."""
assert event.follows
async with self._lock:
self._events[event.id] = event
if event.follows not in self._followers:
self._followers[event.follows] = set()
self._followers[event.follows].add(event.id)
self._waitlist[event.id] = event.received
async def forget_follower(self, follower: ReceivedEvent) -> None:
"""Forget that this event is waiting on another event to arrive."""
assert follower.follows
async with self._lock:
self._waitlist.pop(follower.id, None)
if follower.follows in self._followers:
self._followers[follower.follows].discard(follower.id)
if not self._followers[follower.follows]:
del self._followers[follower.follows]
self._events.pop(follower.id, None)
async def get_followers(self, leader: ReceivedEvent) -> list[ReceivedEvent]:
"""Returns events that were waiting on this leader event to arrive."""
async with self._lock:
follower_ids = self._followers.get(leader.id, set()).copy()
follower_events: list[ReceivedEvent] = []
for follower_id in follower_ids:
if follower_id in self._events:
follower_events.append(self._events[follower_id])
# Sort by occurred time to maintain causal order
return sorted(follower_events, key=lambda f: f.occurred)
async def followers_by_id(self, follower_ids: list[UUID]) -> list[ReceivedEvent]:
"""Returns the events with the given IDs, in the order they occurred."""
async with self._lock:
follower_events = [
self._events[fid] for fid in follower_ids if fid in self._events
]
return sorted(follower_events, key=lambda f: f.occurred)
async def get_lost_followers(self) -> list[ReceivedEvent]:
"""Returns events that were waiting on a leader event that never arrived."""
cutoff_time = prefect.types._datetime.now("UTC") - PRECEDING_EVENT_LOOKBACK
async with self._lock:
lost_ids = [
event_id
for event_id, received_time in self._waitlist.items()
if received_time <= cutoff_time
]
# Remove lost followers from our tracking
lost_events: list[ReceivedEvent] = []
for event_id in lost_ids:
if event_id in self._events:
event = self._events[event_id]
lost_events.append(event)
# Clean up tracking for this lost event
if event.follows and event.follows in self._followers:
self._followers[event.follows].discard(event_id)
if not self._followers[event.follows]:
del self._followers[event.follows]
del self._events[event_id]
del self._waitlist[event_id]
return sorted(lost_events, key=lambda f: f.occurred)
@asynccontextmanager
async def event_is_processing(
self, event: ReceivedEvent
) -> AsyncGenerator[None, None]:
"""Mark an event as being processed for the duration of its lifespan through
the ordering system."""
if not await self.record_event_as_processing(event):
self._log(event, "is already being processed")
raise EventBeingProcessed(event)
try:
yield
await self.record_event_as_seen(event)
finally:
await self.forget_event_is_processing(event)
async def wait_for_leader(self, event: ReceivedEvent) -> None:
"""Given an event, wait for its leader to be processed before proceeding, or
raise EventArrivedEarly if we would wait too long in this attempt."""
# If this event doesn't follow anything (meaningfully), it's ready to go now
if not event.follows or event.follows == event.id:
return
# If this is an old event, we won't have accurate bookkeeping for its leader
# so we're just going to send it
age = prefect.types._datetime.now("UTC") - event.received
if age >= PRECEDING_EVENT_LOOKBACK:
return
# If the leader has already been seen, keep on trucking
if await self.event_has_been_seen(event.follows):
return
# Check if the leader is currently being processed, and if so, poll until it's
# done being processed as a quicker alternative to sitting on the waitlist
if await self.event_has_started_processing(event.follows):
try:
with anyio.fail_after(IN_FLIGHT_EVENT_TIMEOUT.total_seconds()):
while not await self.event_has_been_seen(event.follows):
await asyncio.sleep(0.25)
return
except asyncio.TimeoutError:
self._log(
event,
"timed out waiting for its in-flight leader %s, will treat as lost",
event.follows,
)
# Otherwise, we'll stop processing now and sit on the waitlist until the leader
# eventually comes through the system
self._log(event, "arrived before the event it follows %s", event.follows)
await self.record_follower(event)
raise EventArrivedEarly(event)
@asynccontextmanager
async def preceding_event_confirmed(
self,
handler: event_handler,
event: ReceivedEvent,
depth: int = 0,
) -> AsyncGenerator[None, None]:
"""
Events may optionally declare that they logically follow another event, so that
we can preserve important event orderings in the face of unreliable delivery and
ordering of messages from the queues.
This function keeps track of the ID of each event that this shard has
successfully processed going back to the PRECEDING_EVENT_LOOKBACK period. If an
event arrives that must follow another one, confirm that we have recently seen
and processed that event before proceeding.
Args:
handler: The function to call when an out-of-order event is
ready to be processed
event: The event to be processed. This object should include
metadata indicating if and what event it follows.
depth: The current recursion depth, used to prevent infinite
recursion due to cyclic dependencies between events. Defaults to 0.
Raises EventArrivedEarly if the current event shouldn't be processed yet.
"""
if depth > MAX_DEPTH_OF_PRECEDING_EVENT:
# There is either a cyclic set of events or a chain
# of events that is too long
self._log(
event,
"has exceeded the maximum recursion depth %s",
MAX_DEPTH_OF_PRECEDING_EVENT,
)
raise MaxDepthExceeded(event)
async with self.event_is_processing(event):
await self.wait_for_leader(event)
yield
# We have just processed an event that other events may have been waiting
# on, so let's react to them now in the order they occurred
try:
for waiter in await self.get_followers(event):
await handler(waiter, depth=depth + 1)
except MaxDepthExceeded:
# We'll only process the first MAX_DEPTH_OF_PRECEDING_EVENT followers.
# If we hit this limit, we'll just log and move on.
self._log(
event,
"reached its max depth of %s followers processed.",
MAX_DEPTH_OF_PRECEDING_EVENT,
)
# If this event was itself waiting on a leader, let's consider it as
# resolved now that it has been processed
if event.follows and event.follows != event.id:
await self.forget_follower(event)
def _log(self, event: ReceivedEvent, message: str, *args: Any) -> None:
logger.info(
"Event %r (%s) for %r " + message,
event.event,
event.id,
event.resource.id,
*args,
extra={
"event_id": event.id,
"follows": event.follows,
"resource_id": event.resource.id,
},
)
| CausalOrdering |
python | huggingface__transformers | src/transformers/models/lfm2/modeling_lfm2.py | {
"start": 25485,
"end": 26021
} | class ____(PreTrainedModel):
config: Lfm2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Lfm2DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Lfm2DecoderLayer,
"attentions": Lfm2Attention,
}
@auto_docstring
| Lfm2PreTrainedModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 4615,
"end": 4836
} | class ____(ArgumentError):
"""a Column is being added to a Table that would replace another
Column, without appropriate parameters to allow this in place.
.. versionadded:: 2.0.0b4
"""
| DuplicateColumnError |
python | wandb__wandb | landfill/functional_tests/artifacts/log-model.py | {
"start": 134,
"end": 1006
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def main():
my_model = Net()
_ = log_model(my_model, "my-model", aliases=["boom"], scope_project=True)
wandb.finish()
if __name__ == "__main__":
main()
| Net |
python | gevent__gevent | src/greentest/3.9/test_subprocess.py | {
"start": 2394,
"end": 63482
} | class ____(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
path_cmd = pathlib.Path("my-tool.py")
pathlib_cls = path_cmd.__class__.__name__
cases = [
("ls", True, 123, "<Popen: returncode: 123 args: 'ls'>"),
('a' * 100, True, 0,
"<Popen: returncode: 0 args: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...>"),
(["ls"], False, None, "<Popen: returncode: None args: ['ls']>"),
(["ls", '--my-opts', 'a' * 100], False, None,
"<Popen: returncode: None args: ['ls', '--my-opts', 'aaaaaaaaaaaaaaaaaaaaaaaa...>"),
(path_cmd, False, 7, f"<Popen: returncode: 7 args: {pathlib_cls}('my-tool.py')>")
]
with unittest.mock.patch.object(subprocess.Popen, '_execute_child'):
for cmd, shell, code, sx in cases:
p = subprocess.Popen(cmd, shell=shell)
p.returncode = code
self.assertEqual(repr(p), sx)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
| ProcessTestCase |
python | pypa__virtualenv | src/virtualenv/activation/via_template.py | {
"start": 396,
"end": 3209
} | class ____(Activator, ABC):
@abstractmethod
def templates(self):
raise NotImplementedError
@staticmethod
def quote(string):
"""
Quote strings in the activation script.
:param string: the string to quote
:return: quoted string that works in the activation script
"""
return shlex.quote(string)
def generate(self, creator):
dest_folder = creator.bin_dir
replacements = self.replacements(creator, dest_folder)
generated = self._generate(replacements, self.templates(), dest_folder, creator)
if self.flag_prompt is not None:
creator.pyenv_cfg["prompt"] = self.flag_prompt
return generated
def replacements(self, creator, dest_folder): # noqa: ARG002
return {
"__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
"__VIRTUAL_ENV__": str(creator.dest),
"__VIRTUAL_NAME__": creator.env_name,
"__BIN_NAME__": str(creator.bin_dir.relative_to(creator.dest)),
"__PATH_SEP__": os.pathsep,
"__TCL_LIBRARY__": getattr(creator.interpreter, "tcl_lib", None) or "",
"__TK_LIBRARY__": getattr(creator.interpreter, "tk_lib", None) or "",
}
def _generate(self, replacements, templates, to_folder, creator):
generated = []
for template in templates:
text = self.instantiate_template(replacements, template, creator)
dest = to_folder / self.as_name(template)
# remove the file if it already exists - this prevents permission
# errors when the dest is not writable
if dest.exists():
dest.unlink()
# Powershell assumes Windows 1252 encoding when reading files without BOM
encoding = "utf-8-sig" if str(template).endswith(".ps1") else "utf-8"
# use write_bytes to avoid platform specific line normalization (\n -> \r\n)
dest.write_bytes(text.encode(encoding))
generated.append(dest)
return generated
def as_name(self, template):
return template
def instantiate_template(self, replacements, template, creator):
# read content as binary to avoid platform specific line normalization (\n -> \r\n)
binary = read_binary(self.__module__, template)
text = binary.decode("utf-8", errors="strict")
for key, value in replacements.items():
value_uni = self._repr_unicode(creator, value)
text = text.replace(key, self.quote(value_uni))
return text
@staticmethod
def _repr_unicode(creator, value): # noqa: ARG004
return value # by default, we just let it be unicode
__all__ = [
"ViaTemplateActivator",
]
| ViaTemplateActivator |
python | apache__airflow | task-sdk/tests/task_sdk/io/test_path.py | {
"start": 5623,
"end": 6128
} | class ____:
@pytest.fixture(autouse=True)
def fake_fs(self, monkeypatch):
monkeypatch.setattr(ObjectStoragePath, "_fs_factory", lambda *a, **k: _FakeRemoteFileSystem())
def test_bucket_key_protocol(self):
bucket = "bkt"
key = "yek"
protocol = "s3"
o = ObjectStoragePath(f"{protocol}://{bucket}/{key}")
assert o.bucket == bucket
assert o.container == bucket
assert o.key == key
assert o.protocol == protocol
| TestRemotePath |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 75200,
"end": 77445
} | class ____(nn.Module):
def __init__(self, d_model: int, num_patches: int, expansion: int = 2):
super().__init__()
self.inverse_trans_expansion = nn.Linear(d_model + 2, expansion * d_model)
self.inverse_trans_compression = nn.Linear(expansion * d_model, d_model)
self.map_scale_expansion = nn.Linear(2, 2 * expansion)
self.map_scale_compression = nn.Linear(2 * expansion, 2)
self.num_patches = num_patches
def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor):
"""
Args:
inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`)
loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)
scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)
Returns:
`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`
"""
mean = loc.transpose(-1, -2) # [batch_size x n_channels x 1 ]
mean = mean.unsqueeze(-2) # [batch_size x n_channels x 1 x 1]
mean = mean.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1]
stdev = scale.transpose(-1, -2) # [batch_size x n_channels x 1 ]
stdev = stdev.unsqueeze(-2) # [batch_size x n_channels x 1 x 1]
stdev = stdev.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1]
concat_stats = torch.cat([mean, stdev], dim=-1) # [batch_size x n_channels x num_patch x 2]
concat_stats = self.map_scale_expansion(concat_stats) # [batch_size x n_channels x num_patch x (2*expansion)]
concat_stats = self.map_scale_compression(concat_stats) # [batch_size x n_channels x num_patch x 2]
inputs = torch.cat([inputs, concat_stats], dim=-1) # [batch_size x channels x num_patch x d_model+2]
inputs = self.inverse_trans_expansion(inputs) # [batch_size x channels x num_patch x (expansion*d_model)]
inputs = self.inverse_trans_compression(inputs) # [batch_size x channels x num_patch x d_model]
return inputs
@auto_docstring(
custom_intro="""
`PatchTSMixer` for regression application.
"""
)
| InjectScalerStatistics4D |
python | protocolbuffers__protobuf | python/google/protobuf/internal/unknown_fields_test.py | {
"start": 6116,
"end": 13177
} | class ____(unittest.TestCase):
def setUp(self):
self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.all_fields = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.all_fields)
self.all_fields_data = self.all_fields.SerializeToString()
self.empty_message = unittest_pb2.TestEmptyMessage()
self.empty_message.ParseFromString(self.all_fields_data)
def CheckUnknownField(self, name, unknown_field_set, expected_value):
field_descriptor = self.descriptor.fields_by_name[name]
expected_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[
field_descriptor.type]
for unknown_field in unknown_field_set:
if unknown_field.field_number == field_descriptor.number:
self.assertEqual(expected_type, unknown_field.wire_type)
if expected_type == 3:
# Check group
self.assertEqual(expected_value[0],
unknown_field.data[0].field_number)
self.assertEqual(expected_value[1], unknown_field.data[0].wire_type)
self.assertEqual(expected_value[2], unknown_field.data[0].data)
continue
if expected_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
self.assertIn(type(unknown_field.data), (str, bytes))
if field_descriptor.is_repeated:
self.assertIn(unknown_field.data, expected_value)
else:
self.assertEqual(expected_value, unknown_field.data)
def testCheckUnknownFieldValue(self):
unknown_field_set = unknown_fields.UnknownFieldSet(self.empty_message)
# Test enum.
self.CheckUnknownField('optional_nested_enum',
unknown_field_set,
self.all_fields.optional_nested_enum)
# Test repeated enum.
self.CheckUnknownField('repeated_nested_enum',
unknown_field_set,
self.all_fields.repeated_nested_enum)
# Test varint.
self.CheckUnknownField('optional_int32',
unknown_field_set,
self.all_fields.optional_int32)
# Test fixed32.
self.CheckUnknownField('optional_fixed32',
unknown_field_set,
self.all_fields.optional_fixed32)
# Test fixed64.
self.CheckUnknownField('optional_fixed64',
unknown_field_set,
self.all_fields.optional_fixed64)
# Test length delimited.
self.CheckUnknownField('optional_string',
unknown_field_set,
self.all_fields.optional_string.encode('utf-8'))
# Test group.
self.CheckUnknownField('optionalgroup',
unknown_field_set,
(17, 0, 117))
self.assertEqual(99, len(unknown_field_set))
def testCopyFrom(self):
message = unittest_pb2.TestEmptyMessage()
message.CopyFrom(self.empty_message)
self.assertEqual(message.SerializeToString(), self.all_fields_data)
def testMergeFrom(self):
message = unittest_pb2.TestAllTypes()
message.optional_int32 = 1
message.optional_uint32 = 2
source = unittest_pb2.TestEmptyMessage()
source.ParseFromString(message.SerializeToString())
message.ClearField('optional_int32')
message.optional_int64 = 3
message.optional_uint32 = 4
destination = unittest_pb2.TestEmptyMessage()
unknown_field_set = unknown_fields.UnknownFieldSet(destination)
self.assertEqual(0, len(unknown_field_set))
destination.ParseFromString(message.SerializeToString())
self.assertEqual(0, len(unknown_field_set))
unknown_field_set = unknown_fields.UnknownFieldSet(destination)
self.assertEqual(2, len(unknown_field_set))
destination.MergeFrom(source)
self.assertEqual(2, len(unknown_field_set))
# Check that the fields where correctly merged, even stored in the unknown
# fields set.
message.ParseFromString(destination.SerializeToString())
self.assertEqual(message.optional_int32, 1)
self.assertEqual(message.optional_uint32, 2)
self.assertEqual(message.optional_int64, 3)
def testClear(self):
unknown_field_set = unknown_fields.UnknownFieldSet(self.empty_message)
self.empty_message.Clear()
# All cleared, even unknown fields.
self.assertEqual(self.empty_message.SerializeToString(), b'')
self.assertEqual(len(unknown_field_set), 99)
@unittest.skipIf((sys.version_info.major, sys.version_info.minor) < (3, 4),
'tracemalloc requires python 3.4+')
def testUnknownFieldsNoMemoryLeak(self):
# Call to UnknownFields must not leak memory
nb_leaks = 1234
def leaking_function():
for _ in range(nb_leaks):
unknown_fields.UnknownFieldSet(self.empty_message)
tracemalloc.start()
snapshot1 = tracemalloc.take_snapshot()
leaking_function()
snapshot2 = tracemalloc.take_snapshot()
top_stats = snapshot2.compare_to(snapshot1, 'lineno')
tracemalloc.stop()
# There's no easy way to look for a precise leak source.
# Rely on a "marker" count value while checking allocated memory.
self.assertEqual([], [x for x in top_stats if x.count_diff == nb_leaks])
def testSubUnknownFields(self):
message = unittest_pb2.TestAllTypes()
message.optionalgroup.a = 123
destination = unittest_pb2.TestEmptyMessage()
destination.ParseFromString(message.SerializeToString())
sub_unknown_fields = unknown_fields.UnknownFieldSet(destination)[0].data
self.assertEqual(1, len(sub_unknown_fields))
self.assertEqual(sub_unknown_fields[0].data, 123)
destination.Clear()
self.assertEqual(1, len(sub_unknown_fields))
self.assertEqual(sub_unknown_fields[0].data, 123)
message.Clear()
message.optional_uint32 = 456
nested_message = unittest_pb2.NestedTestAllTypes()
nested_message.payload.optional_nested_message.ParseFromString(
message.SerializeToString())
unknown_field_set = unknown_fields.UnknownFieldSet(
nested_message.payload.optional_nested_message)
self.assertEqual(unknown_field_set[0].data, 456)
nested_message.ClearField('payload')
self.assertEqual(unknown_field_set[0].data, 456)
unknown_field_set = unknown_fields.UnknownFieldSet(
nested_message.payload.optional_nested_message)
self.assertEqual(0, len(unknown_field_set))
def testUnknownField(self):
message = unittest_pb2.TestAllTypes()
message.optional_int32 = 123
destination = unittest_pb2.TestEmptyMessage()
destination.ParseFromString(message.SerializeToString())
unknown_field = unknown_fields.UnknownFieldSet(destination)[0]
destination.Clear()
self.assertEqual(unknown_field.data, 123)
def testUnknownExtensions(self):
message = unittest_pb2.TestEmptyMessageWithExtensions()
message.ParseFromString(self.all_fields_data)
self.assertEqual(len(unknown_fields.UnknownFieldSet(message)), 99)
self.assertEqual(message.SerializeToString(), self.all_fields_data)
@testing_refleaks.TestCase
| UnknownFieldsAccessorsTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 2942,
"end": 3037
} | class ____(A6):
def m2(self):
return self.m1() # Interval: [2,5] /\ [6,7] = Empty
| C6 |
python | huggingface__transformers | src/transformers/models/smollm3/modeling_smollm3.py | {
"start": 19657,
"end": 22704
} | class ____(SmolLM3PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = SmolLM3Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, SmolLM3ForCausalLM
>>> model = SmolLM3ForCausalLM.from_pretrained("meta-smollm3/SmolLM3-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-smollm3/SmolLM3-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| SmolLM3ForCausalLM |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 360006,
"end": 364856
} | class ____(Response):
"""
Response of tasks.reset_many endpoint.
:param reset: Number of tasks reset
:type reset: int
:param dequeued: Number of tasks dequeued
:type dequeued: dict
:param deleted_models: Number of output models deleted by the reset
:type deleted_models: int
:param urls: The urls of the files that were uploaded by the tasks. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "reset_many"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted_models": {
"description": "Number of output models deleted by the reset",
"type": ["integer", "null"],
},
"dequeued": {
"additionalProperties": True,
"description": "Number of tasks dequeued",
"type": ["object", "null"],
},
"reset": {
"description": "Number of tasks reset",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the tasks. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
}
def __init__(
self,
reset: Optional[int] = None,
dequeued: Optional[dict] = None,
deleted_models: Optional[int] = None,
urls: Any = None,
**kwargs: Any
) -> None:
super(ResetManyResponse, self).__init__(**kwargs)
self.reset = reset
self.dequeued = dequeued
self.deleted_models = deleted_models
self.urls = urls
@schema_property("reset")
def reset(self) -> Optional[int]:
return self._property_reset
@reset.setter
def reset(self, value: Optional[int]) -> None:
if value is None:
self._property_reset = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "reset", six.integer_types)
self._property_reset = value
@schema_property("dequeued")
def dequeued(self) -> Optional[dict]:
return self._property_dequeued
@dequeued.setter
def dequeued(self, value: Optional[dict]) -> None:
if value is None:
self._property_dequeued = None
return
self.assert_isinstance(value, "dequeued", (dict,))
self._property_dequeued = value
@schema_property("deleted_models")
def deleted_models(self) -> Optional[int]:
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
| ResetManyResponse |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 34662,
"end": 35983
} | class ____(nn.Module):
def __init__(self, config, dropout=0.1, activation="relu"):
super().__init__()
hidden_size = config.patch_transformer_hidden_size
intermediate_size = config.patch_transformer_intermediate_size
num_attention_heads = config.patch_transformer_num_attention_heads
self.self_attn = ZoeDepthMultiheadAttention(hidden_size, num_attention_heads, dropout=dropout)
self.linear1 = nn.Linear(hidden_size, intermediate_size)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(intermediate_size, hidden_size)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
def forward(
self,
src,
src_mask: Optional[torch.Tensor] = None,
):
queries = keys = src
src2 = self.self_attn(queries=queries, keys=keys, values=src, attention_mask=src_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
| ZoeDepthTransformerEncoderLayer |
python | rapidsai__cudf | python/cudf/cudf/core/column_accessor.py | {
"start": 2609,
"end": 28289
} | class ____(MutableMapping):
"""
Parameters
----------
data : mapping
Mapping of keys to column values.
multiindex : bool, optional
Whether tuple keys represent a hierarchical
index with multiple "levels" (default=False).
level_names : tuple, optional
Tuple containing names for each of the levels.
For a non-hierarchical index, a tuple of size 1
may be passe.
rangeindex : bool, optional
Whether the keys should be returned as a RangeIndex
in `to_pandas_index` (default=False).
label_dtype : Dtype, optional
What dtype should be returned in `to_pandas_index`
(default=None).
verify : bool, optional
For non ColumnAccessor inputs, whether to verify
column length and data.values() are all Columns
"""
_data: dict[Hashable, ColumnBase]
_level_names: tuple[Hashable, ...]
def __init__(
self,
data: MutableMapping[Hashable, ColumnBase] | Self,
multiindex: bool = False,
level_names=None,
rangeindex: bool = False,
label_dtype: Dtype | None = None,
verify: bool = True,
) -> None:
if isinstance(data, ColumnAccessor):
self._data = data._data
self._level_names = data.level_names
self.multiindex: bool = data.multiindex
self.rangeindex: bool = data.rangeindex
self.label_dtype: Dtype | None = data.label_dtype
elif isinstance(data, MutableMapping):
# This code path is performance-critical for copies and should be
# modified with care.
if data and verify:
# Faster than next(iter(data.values()))
column_length = len(data[next(iter(data))])
# TODO: we should validate the keys of `data`
for col in data.values():
if not isinstance(col, column.ColumnBase):
raise ValueError(
f"All data.values() must be Column, not {type(col).__name__}"
)
if len(col) != column_length:
raise ValueError("All columns must be of equal length")
if not isinstance(data, dict):
data = dict(data)
self._data = data
if rangeindex and multiindex:
raise ValueError(
f"{rangeindex=} and {multiindex=} cannot both be True."
)
self.rangeindex = rangeindex
self.multiindex = multiindex
self.label_dtype = label_dtype
self._level_names = level_names
else:
raise ValueError(
f"data must be a ColumnAccessor or MutableMapping, not {type(data).__name__}"
)
def __iter__(self) -> Iterator:
return iter(self._data)
def __getitem__(self, key: Hashable) -> ColumnBase:
return self._data[key]
def __setitem__(self, key: Hashable, value: ColumnBase) -> None:
self.set_by_label(key, value)
def __delitem__(self, key: Hashable) -> None:
old_ncols = len(self)
del self._data[key]
new_ncols = len(self)
self._clear_cache(old_ncols, new_ncols)
def __len__(self) -> int:
return len(self._data)
def __repr__(self) -> str:
type_info = (
f"{self.__class__.__name__}("
f"multiindex={self.multiindex}, "
f"level_names={self.level_names}, "
f"rangeindex={self.rangeindex}, "
f"label_dtype={self.label_dtype})"
)
column_info = "\n".join(
[f"{name}: {col.dtype}" for name, col in self.items()]
)
return f"{type_info}\n{column_info}"
def _from_columns_like_self(
self, columns: Iterable[ColumnBase], verify: bool = True
) -> Self:
"""
Return a new ColumnAccessor with columns and the properties of self.
Parameters
----------
columns : iterable of Columns
New columns for the ColumnAccessor.
verify : bool, optional
Whether to verify column length and type.
"""
return type(self)(
data=dict(zip(self.names, columns, strict=True)),
multiindex=self.multiindex,
level_names=self.level_names,
rangeindex=self.rangeindex,
label_dtype=self.label_dtype,
verify=verify,
)
@property
def level_names(self) -> tuple[Hashable, ...]:
if self.is_cached("to_pandas_index"):
return self.to_pandas_index.names
if self._level_names is None or len(self._level_names) == 0:
return tuple((None,) * max(1, self.nlevels))
else:
return self._level_names
def is_cached(self, attr_name: str) -> bool:
return attr_name in self.__dict__
@property
def nlevels(self) -> int:
if len(self) == 0:
return 0
if not self.multiindex:
return 1
else:
return len(next(iter(self.keys())))
@property
def name(self) -> Hashable:
return self.level_names[-1]
@cached_property
def nrows(self) -> int:
if len(self) == 0:
return 0
else:
return len(next(iter(self.values())))
@cached_property
def names(self) -> tuple[Hashable, ...]:
return tuple(self.keys())
@cached_property
def columns(self) -> tuple[ColumnBase, ...]:
return tuple(self.values())
@cached_property
def _grouped_data(self) -> MutableMapping:
"""
If self.multiindex is True,
return the underlying mapping as a nested mapping.
"""
if self.multiindex:
return _NestedGetItemDict.from_zip(
zip(self.names, self.columns, strict=True)
)
else:
return self._data
def _clear_cache(self, old_ncols: int, new_ncols: int) -> None:
"""
Clear cached attributes.
Parameters
----------
old_ncols: int
len(self) before self._data was modified
new_ncols: int
len(self) after self._data was modified
"""
cached_properties = (
"columns",
"names",
"_grouped_data",
"to_pandas_index",
)
for attr in cached_properties:
try:
self.__delattr__(attr)
except AttributeError:
pass
# nrows should only be cleared if empty before/after the op.
if (old_ncols == 0) ^ (new_ncols == 0):
try:
del self.nrows
except AttributeError:
pass
@cached_property
def to_pandas_index(self) -> pd.Index:
"""Convert the keys of the ColumnAccessor to a Pandas Index object."""
if self.multiindex and len(self.level_names) > 0:
result = pd.MultiIndex.from_tuples(
self.names,
names=self.level_names,
)
else:
# Determine if we can return a RangeIndex
if self.rangeindex:
if not self.names:
return pd.RangeIndex(
start=0, stop=0, step=1, name=self.name
)
elif infer_dtype(self.names) == "integer":
if len(self.names) == 1:
start = cast(int, self.names[0])
return pd.RangeIndex(
start=start, stop=start + 1, step=1, name=self.name
)
uniques = np.unique(np.diff(np.array(self.names)))
if len(uniques) == 1 and uniques[0] != 0:
diff = uniques[0]
new_range = range(
cast(int, self.names[0]),
cast(int, self.names[-1]) + diff,
diff,
)
return pd.RangeIndex(new_range, name=self.name)
result = pd.Index(
self.names,
name=self.name,
tupleize_cols=False,
dtype=self.label_dtype,
)
return result
def insert(self, name: Hashable, value: ColumnBase, loc: int = -1) -> None:
"""
Insert column into the ColumnAccessor at the specified location.
Parameters
----------
name : Name corresponding to the new column
value : ColumnBase
loc : int, optional
The location to insert the new value at.
Must be (0 <= loc <= ncols). By default, the column is added
to the end.
Returns
-------
None, this function operates in-place.
"""
name = self._pad_key(name)
if name in self._data:
raise ValueError(f"Cannot insert '{name}', already exists")
old_ncols = len(self)
if loc == -1:
loc = old_ncols
elif not (0 <= loc <= old_ncols):
raise ValueError(
f"insert: loc out of bounds: must be 0 <= loc <= {old_ncols}"
)
if not isinstance(value, column.ColumnBase):
raise ValueError("value must be a Column")
elif old_ncols > 0 and len(value) != self.nrows:
raise ValueError("All columns must be of equal length")
if cudf.get_option("mode.pandas_compatible"):
try:
pd_idx1 = pd.Index(
[*list(self.names), name], dtype=self.label_dtype
)
pd_idx2 = pd.Index([*list(self.names), name])
if (
pd_idx1.dtype != pd_idx2.dtype
and is_mixed_with_object_dtype(pd_idx1, pd_idx2)
and pd_idx1.inferred_type != pd_idx2.inferred_type
):
raise MixedTypeError(
"Cannot insert column with mixed types when label_dtype is set"
)
except Exception as e:
raise e
else:
self.label_dtype = None
# TODO: we should move all insert logic here
if loc == old_ncols:
self._data[name] = value
else:
new_keys = self.names[:loc] + (name,) + self.names[loc:]
new_values = self.columns[:loc] + (value,) + self.columns[loc:]
self._data = dict(zip(new_keys, new_values, strict=True))
self._clear_cache(old_ncols, old_ncols + 1)
# The type(name) may no longer match the prior label_dtype
def copy(self, deep: bool = False) -> Self:
"""
Make a copy of this ColumnAccessor.
"""
if deep or cudf.get_option("copy_on_write"):
data = {k: v.copy(deep=deep) for k, v in self._data.items()}
else:
data = self._data.copy()
return self.__class__(
data=data,
multiindex=self.multiindex,
level_names=self.level_names,
rangeindex=self.rangeindex,
label_dtype=self.label_dtype,
verify=False,
)
def select_by_label(self, key: Any) -> Self:
"""
Return a subset of this column accessor,
composed of the keys specified by `key`.
Parameters
----------
key : slice, list-like, tuple or scalar
Returns
-------
ColumnAccessor
"""
if isinstance(key, slice):
return self._select_by_label_slice(key)
elif not (isinstance(key, tuple) or is_scalar(key)):
return self._select_by_label_list_like(tuple(key))
else:
if isinstance(key, tuple):
if any(isinstance(k, slice) for k in key):
return self._select_by_label_with_wildcard(key)
return self._select_by_label_grouped(key)
def get_labels_by_index(
self, index: slice | int | Iterable[int | bool]
) -> tuple:
"""Get the labels corresponding to the provided column indices.
Parameters
----------
index : integer, integer slice, boolean mask,
or list-like of integers
The column indexes.
Returns
-------
tuple
"""
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
return self.names[start:stop:step]
elif isinstance(index, int):
return (self.names[index],)
elif (bn := len(index)) > 0 and all(map(_is_bool, index)): # type: ignore[arg-type]
if bn != (n := len(self)):
raise IndexError(
f"Boolean mask has wrong length: {bn} not {n}"
)
if isinstance(index, (pd.Series, cudf.Series)):
# Don't allow iloc indexing with series
raise NotImplementedError(
"Cannot use Series object for mask iloc indexing"
)
# TODO: Doesn't handle on-device columns
return tuple(
n for n, keep in zip(self.names, index, strict=True) if keep
)
else:
if len(set(index)) != len(index): # type: ignore[arg-type]
raise NotImplementedError(
"Selecting duplicate column labels is not supported."
)
return tuple(self.names[i] for i in index)
def select_by_index(self, index: Any) -> Self:
"""
Return a ColumnAccessor composed of the columns
specified by index.
Parameters
----------
key : integer, integer slice, boolean mask,
or list-like of integers
Returns
-------
ColumnAccessor
"""
keys = self.get_labels_by_index(index)
data = {k: self._data[k] for k in keys}
return type(self)(
data,
multiindex=self.multiindex,
level_names=self.level_names,
label_dtype=self.label_dtype,
verify=False,
)
def swaplevel(self, i: Hashable = -2, j: Hashable = -1) -> Self:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int or str, default -2
First level of index to be swapped.
j : int or str, default -1
Second level of index to be swapped.
Returns
-------
ColumnAccessor
"""
if not self.multiindex:
raise ValueError(
"swaplevel is only valid for self.multiindex=True"
)
i = _get_level(i, self.nlevels, self.level_names)
j = _get_level(j, self.nlevels, self.level_names)
new_keys = [list(row) for row in self]
new_dict = {}
# swap old keys for i and j
for n, row in enumerate(self.names):
new_keys[n][i], new_keys[n][j] = row[j], row[i] # type: ignore[call-overload, index]
new_dict.update({row: tuple(new_keys[n])})
# TODO: Change to deep=False when copy-on-write is default
new_data = {new_dict[k]: v.copy(deep=True) for k, v in self.items()}
# swap level_names for i and j
new_names = list(self.level_names)
new_names[i], new_names[j] = new_names[j], new_names[i] # type: ignore[call-overload]
return type(self)(
new_data, # type: ignore[arg-type]
multiindex=self.multiindex,
level_names=new_names,
rangeindex=self.rangeindex,
label_dtype=self.label_dtype,
verify=False,
)
def set_by_label(self, key: Hashable, value: ColumnBase) -> None:
"""
Add (or modify) column by name.
Parameters
----------
key
name of the column
value : Column
The value to insert into the column.
"""
key = self._pad_key(key)
if not isinstance(value, column.ColumnBase):
raise ValueError("value must be a Column")
if len(self) > 0 and len(value) != self.nrows:
raise ValueError("All columns must be of equal length")
old_ncols = len(self)
self._data[key] = value
new_ncols = len(self)
self._clear_cache(old_ncols, new_ncols)
def _select_by_label_list_like(self, key: tuple) -> Self:
# Special-casing for boolean mask
if (bn := len(key)) > 0 and all(map(_is_bool, key)):
if bn != (n := len(self)):
raise IndexError(
f"Boolean mask has wrong length: {bn} not {n}"
)
data = dict(
item
for item, keep in zip(
self._grouped_data.items(), key, strict=True
)
if keep
)
else:
data = {k: self._grouped_data[k] for k in key}
if len(data) != len(key):
raise ValueError(
"Selecting duplicate column labels is not supported."
)
if self.multiindex:
data = dict(_to_flat_dict_inner(data))
return type(self)(
data,
multiindex=self.multiindex,
level_names=self.level_names,
label_dtype=self.label_dtype,
verify=False,
)
def _select_by_label_grouped(self, key: Hashable) -> Self:
result = self._grouped_data[key]
if isinstance(result, column.ColumnBase):
# self._grouped_data[key] = self._data[key] so skip validation
return type(self)(
data={key: result},
multiindex=self.multiindex,
label_dtype=self.label_dtype,
verify=False,
)
else:
if self.multiindex:
result = dict(_to_flat_dict_inner(result))
if not isinstance(key, tuple):
key = (key,)
return self.__class__(
result,
multiindex=self.nlevels - len(key) > 1,
level_names=self.level_names[len(key) :],
verify=False,
)
def _select_by_label_slice(self, key: slice) -> Self:
start, stop = key.start, key.stop
if len(self) == 0:
# https://github.com/rapidsai/cudf/issues/18376
# Any slice is valid when we have no columns
return self._from_columns_like_self([], verify=False)
if key.step is not None:
raise TypeError("Label slicing with step is not supported")
if start is None:
start = self.names[0]
if stop is None:
stop = self.names[-1]
start = self._pad_key(start, slice(None))
stop = self._pad_key(stop, slice(None))
for idx, name in enumerate(self.names):
if _keys_equal(name, start):
start_idx = idx
break
for idx, name in enumerate(reversed(self.names)):
if _keys_equal(name, stop):
stop_idx = len(self) - idx
break
keys = self.names[start_idx:stop_idx]
return type(self)(
{k: self._data[k] for k in keys},
multiindex=self.multiindex,
level_names=self.level_names,
label_dtype=self.label_dtype,
verify=False,
)
def _select_by_label_with_wildcard(self, key: tuple) -> Self:
pad_key = self._pad_key(key, slice(None))
data = {
k: self._data[k]
for k in self.names
if _keys_equal(k, pad_key) # type: ignore[arg-type]
}
return type(self)(
data,
multiindex=self.multiindex,
level_names=self.level_names,
label_dtype=self.label_dtype,
verify=False,
)
def _pad_key(self, key: Hashable, pad_value: str | slice = "") -> Hashable:
"""
Pad the provided key to a length equal to the number
of levels.
"""
if not self.multiindex:
return key
if not isinstance(key, tuple):
key = (key,)
return key + (pad_value,) * (self.nlevels - len(key))
def rename_levels(
self,
mapper: Mapping[Hashable, Hashable] | Callable,
level: int | None = None,
) -> Self:
"""
Rename the specified levels of the given ColumnAccessor
Parameters
----------
self : ColumnAccessor of a given dataframe
mapper : dict-like or function transformations to apply to
the column label values depending on selected ``level``.
If dict-like, only replace the specified level of the
ColumnAccessor's keys (that match the mapper's keys) with
mapper's values
If callable, the function is applied only to the specified level
of the ColumnAccessor's keys.
level : int
In case of RangeIndex, only supported level is [0, None].
In case of a MultiColumn, only the column labels in the specified
level of the ColumnAccessor's keys will be transformed.
Returns
-------
A new ColumnAccessor with values in the keys replaced according
to the given mapper and level.
"""
new_col_names: Iterable
if self.multiindex:
def rename_column(x):
x = list(x)
if isinstance(mapper, Mapping):
x[level] = mapper.get(x[level], x[level])
else:
x[level] = mapper(x[level])
x = tuple(x)
return x
if level is None:
level = 0
new_col_names = (rename_column(k) for k in self.keys())
else:
if level is None:
level = 0
if level != 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
if isinstance(mapper, Mapping):
new_col_names = [
mapper.get(col_name, col_name) for col_name in self.keys()
]
else:
new_col_names = [mapper(col_name) for col_name in self.keys()]
if len(new_col_names) != len(set(new_col_names)):
raise ValueError("Duplicate column names are not allowed")
label_dtype = self.label_dtype
if len(self) > 0 and label_dtype is not None:
old_type = type(next(iter(self.keys())))
if not all(isinstance(label, old_type) for label in new_col_names):
label_dtype = None
data = dict(zip(new_col_names, self.values(), strict=True))
return type(self)(
data=data,
level_names=self.level_names,
multiindex=self.multiindex,
label_dtype=label_dtype,
verify=False,
)
def droplevel(self, level: int) -> None:
# drop the nth level
if level < 0:
level += self.nlevels
old_ncols = len(self)
self._data = {
_remove_key_level(key, level): value # type: ignore[arg-type]
for key, value in self._data.items()
}
new_ncols = len(self)
self._level_names = (
self.level_names[:level] + self.level_names[level + 1 :]
)
if len(self.level_names) == 1:
# can't use nlevels, as it depends on multiindex
self.multiindex = False
self._clear_cache(old_ncols, new_ncols)
def _keys_equal(target: Hashable, key: Iterable) -> bool:
"""
Compare `key` to `target`.
Return True if each value in `key` == corresponding value in `target`.
If any value in `key` is slice(None), it is considered equal
to the corresponding value in `target`.
"""
if not isinstance(target, tuple):
return target == key
for k1, k2 in itertools.zip_longest(target, key, fillvalue=None):
if k2 == slice(None):
continue
if k1 != k2:
return False
return True
def _remove_key_level(key: tuple, level: int) -> Hashable:
"""
Remove a level from key. If detupleize is True, and if only a
single level remains, convert the tuple to a scalar.
"""
result = key[:level] + key[level + 1 :]
if len(result) == 1:
return result[0]
return result
def _get_level(
x: Hashable, nlevels: int, level_names: tuple[Hashable, ...]
) -> Hashable:
"""Get the level index from a level number or name.
If given an integer, this function will handle wraparound for
negative values. If given a string (the level name), this function
will extract the index of that level from `level_names`.
Parameters
----------
x
The level number to validate
nlevels
The total available levels in the MultiIndex
level_names
The names of the levels.
"""
if isinstance(x, int):
if x < 0:
x += nlevels
if x >= nlevels:
raise IndexError(
f"Level {x} out of bounds. Index has {nlevels} levels."
)
return x
else:
x = level_names.index(x)
return x
| ColumnAccessor |
python | django__django | tests/template_tests/syntax_tests/i18n/test_blocktranslate.py | {
"start": 31315,
"end": 31389
} | class ____(MiscTests):
tag_name = "blocktrans"
| MiscBlockTranslationTests |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_xcoms.py | {
"start": 11329,
"end": 20256
} | class ____:
@pytest.mark.parametrize(
("value", "expected_value"),
[
('"value1"', '"value1"'),
('{"key2": "value2"}', '{"key2": "value2"}'),
('{"key2": "value2", "key3": ["value3"]}', '{"key2": "value2", "key3": ["value3"]}'),
('["value1"]', '["value1"]'),
(None, None),
],
)
def test_xcom_set(self, client, create_task_instance, session, value, expected_value):
"""
Test that XCom value is set correctly. The request body can be either:
- a JSON string (e.g. '"value"', '{"k":"v"}', '[1]'), which is stored as-is (a string) in the DB
- the JSON literal null, which is stored as a None
This mirrors the Execution API contract where the body is the JSON value itself; Task SDKs may send
pre-serialized JSON strings or null.
"""
ti = create_task_instance()
session.commit()
value = serialize(value)
response = client.post(
f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/xcom_1",
json=value,
)
assert response.status_code == 201
assert response.json() == {"message": "XCom successfully set"}
xcom = session.query(XComModel).filter_by(task_id=ti.task_id, dag_id=ti.dag_id, key="xcom_1").first()
assert xcom.value == expected_value
task_map = session.query(TaskMap).filter_by(task_id=ti.task_id, dag_id=ti.dag_id).one_or_none()
assert task_map is None, "Should not be mapped"
@pytest.mark.parametrize(
("orig_value", "ser_value", "deser_value"),
[
pytest.param(1, 1, 1, id="int"),
pytest.param(1.0, 1.0, 1.0, id="float"),
pytest.param("string", "string", "string", id="str"),
pytest.param(True, True, True, id="bool"),
pytest.param({"key": "value"}, {"key": "value"}, {"key": "value"}, id="dict"),
pytest.param([1, 2], [1, 2], [1, 2], id="list"),
pytest.param(
(1, 2),
# Client serializes tuple as encoded list, send the encoded list to the API
{"__classname__": "builtins.tuple", "__data__": [1, 2], "__version__": 1},
# The API will send the encoded list to the DB and sends the same encoded list back
# during the response to the client as it is the clients responsibility to
# serialize it into a JSON object & deserialize value into a native object.
{"__classname__": "builtins.tuple", "__data__": [1, 2], "__version__": 1},
id="tuple",
),
],
)
def test_xcom_round_trip(self, client, create_task_instance, session, orig_value, ser_value, deser_value):
"""
Test that deserialization works when XCom values are stored directly in the DB with API Server.
This tests the case where the XCom value is stored from the Task API where the value is serialized
via Client SDK into JSON object and passed via the API Server to the DB. It by-passes
the XComModel.serialize_value and stores valid Python JSON compatible objects to DB.
This test is to ensure that the deserialization works correctly in this case as well as
checks that the value is stored correctly before it hits the API.
"""
ti = create_task_instance()
session.commit()
# Serialize the value to simulate the client SDK
value = serialize(orig_value)
# Test that the value is serialized correctly
assert value == ser_value
response = client.post(
f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/xcom_1",
json=value,
)
assert response.status_code == 201
stored_value = session.execute(
XComModel.get_many(
key="xcom_1",
dag_ids=ti.dag_id,
task_ids=ti.task_id,
run_id=ti.run_id,
).with_only_columns(XComModel.value)
).first()
deserialized_value = XComModel.deserialize_value(stored_value)
assert deserialized_value == deser_value
# Ensure that the deserialized value on the client side is the same as the original value
assert deserialize(deserialized_value) == orig_value
def test_xcom_set_mapped(self, client, create_task_instance, session):
ti = create_task_instance()
session.commit()
value = serialize("value1")
response = client.post(
f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/xcom_1",
params={"map_index": -1, "mapped_length": 3},
json=value,
)
assert response.status_code == 201
assert response.json() == {"message": "XCom successfully set"}
xcom = (
session.query(XComModel)
.filter_by(task_id=ti.task_id, dag_id=ti.dag_id, key="xcom_1", map_index=-1)
.first()
)
assert xcom.value == "value1"
task_map = session.query(TaskMap).filter_by(task_id=ti.task_id, dag_id=ti.dag_id).one_or_none()
assert task_map is not None, "Should be mapped"
assert task_map.dag_id == "dag"
assert task_map.run_id == "test"
assert task_map.task_id == "op1"
assert task_map.map_index == -1
assert task_map.length == 3
@pytest.mark.parametrize(
("length", "err_context"),
[
pytest.param(
20,
contextlib.nullcontext(),
id="20-success",
),
pytest.param(
2000,
pytest.raises(httpx.HTTPStatusError),
id="2000-too-long",
),
],
)
def test_xcom_set_downstream_of_mapped(self, client, create_task_instance, session, length, err_context):
"""
Test that XCom value is set correctly. The value is passed as a JSON string in the request body.
XCom.set then uses json.dumps to serialize it and store the value in the database.
This is done so that Task SDK in multiple languages can use the same API to set XCom values.
"""
ti = create_task_instance()
session.commit()
with err_context:
response = client.post(
f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/xcom_1",
json='"valid json"',
params={"mapped_length": length},
)
response.raise_for_status()
task_map = session.query(TaskMap).filter_by(task_id=ti.task_id, dag_id=ti.dag_id).one_or_none()
assert task_map.length == length
@pytest.mark.usefixtures("access_denied")
def test_xcom_access_denied(self, client, caplog):
with caplog.at_level(logging.DEBUG):
response = client.post(
"/execution/xcoms/dag/runid/task/xcom_perms",
json='"value1"',
)
assert response.status_code == 403
assert response.json() == {
"detail": {
"reason": "access_denied",
}
}
assert any(msg.startswith("Checking write XCom access") for msg in caplog.messages)
@pytest.mark.parametrize(
("value", "expected_value"),
[
('"value1"', '"value1"'),
('{"key2": "value2"}', '{"key2": "value2"}'),
('{"key2": "value2", "key3": ["value3"]}', '{"key2": "value2", "key3": ["value3"]}'),
('["value1"]', '["value1"]'),
],
)
def test_xcom_roundtrip(self, client, create_task_instance, session, value, expected_value):
"""
Test that XCom value is set and retrieved correctly using API.
This test sets an XCom value using the API and then retrieves it using the API so we can
ensure client and server are working correctly together. The server expects a JSON string
and it will also return a JSON string. It is the client's responsibility to parse the JSON
string into a native object. This is useful for Task SDKs in other languages.
"""
ti = create_task_instance()
value = serialize(value)
session.commit()
client.post(
f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/test_xcom_roundtrip",
json=value,
)
xcom = (
session.query(XComModel)
.filter_by(task_id=ti.task_id, dag_id=ti.dag_id, key="test_xcom_roundtrip")
.first()
)
assert xcom.value == expected_value
response = client.get(f"/execution/xcoms/{ti.dag_id}/{ti.run_id}/{ti.task_id}/test_xcom_roundtrip")
assert response.status_code == 200
assert XComResponse.model_validate_json(response.read()).value == expected_value
| TestXComsSetEndpoint |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 46881,
"end": 46925
} | class ____(mkl_info):
pass
| lapack_mkl_info |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/snowflake_datasource.py | {
"start": 9321,
"end": 9605
} | class ____(pydantic.UrlError):
"""
Custom Pydantic error for missing query parameters in SnowflakeDsn.
"""
def __init__(self, **ctx: Any) -> None:
super().__init__(**ctx)
code = "url.query"
msg_template = "URL query param missing"
| _UrlMissingQueryError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink08.py | {
"start": 315,
"end": 1047
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url(
"A1", "external://VBOXSVR/share/foo.xlsx", None, "J:\\foo.xlsx"
)
worksheet.write_url("A3", "external:foo.xlsx")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/settings_tests/tests.py | {
"start": 4394,
"end": 4534
} | class ____(TestCase):
pass
@modify_settings(ITEMS={"append": ["child"]})
@override_settings(TEST="override-child")
| ParentDecoratedTestCase |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/actions.py | {
"start": 1377,
"end": 1656
} | class ____(CompositeAction):
"""Composite action parser for a Windows target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return WindowsTargetParser()
| WindowsTargetAction |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 9217,
"end": 9597
} | class ____(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions. """
option = 'order'
requires: list[str] = []
excludes: list[str] = []
@classmethod
def default(cls):
return sympy.polys.orderings.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.orderings.monomial_key(order)
| Order |
python | django__django | tests/utils_tests/test_http.py | {
"start": 15045,
"end": 19581
} | class ____(unittest.TestCase):
def test_basic(self):
tests = [
("", ("", {})),
(None, ("", {})),
("text/plain", ("text/plain", {})),
("text/vnd.just.made.this.up ; ", ("text/vnd.just.made.this.up", {})),
("text/plain;charset=us-ascii", ("text/plain", {"charset": "us-ascii"})),
(
'text/plain ; charset="us-ascii"',
("text/plain", {"charset": "us-ascii"}),
),
(
'text/plain ; charset="us-ascii"; another=opt',
("text/plain", {"charset": "us-ascii", "another": "opt"}),
),
(
'attachment; filename="silly.txt"',
("attachment", {"filename": "silly.txt"}),
),
(
'attachment; filename="strange;name"',
("attachment", {"filename": "strange;name"}),
),
(
'attachment; filename="strange;name";size=123;',
("attachment", {"filename": "strange;name", "size": "123"}),
),
(
'attachment; filename="strange;name";;;;size=123;;;',
("attachment", {"filename": "strange;name", "size": "123"}),
),
(
'form-data; name="files"; filename="fo\\"o;bar"',
("form-data", {"name": "files", "filename": 'fo"o;bar'}),
),
(
'form-data; name="files"; filename="\\"fo\\"o;b\\\\ar\\""',
("form-data", {"name": "files", "filename": '"fo"o;b\\ar"'}),
),
]
for header, expected in tests:
with self.subTest(header=header):
self.assertEqual(parse_header_parameters(header), expected)
def test_rfc2231_parsing(self):
test_data = (
(
"Content-Type: application/x-stuff; "
"title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***",
),
(
"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html",
),
(
"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html",
),
)
for raw_line, expected_title in test_data:
parsed = parse_header_parameters(raw_line)
self.assertEqual(parsed[1]["title"], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(
"Content-Type: application/x-stuff; "
"title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
"'This%20is%20%2A%2A%2Afun%2A%2A%2A",
),
("Content-Type: application/x-stuff; title*='foo.html", "'foo.html"),
("Content-Type: application/x-stuff; title*=bar.html", "bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header_parameters(raw_line)
self.assertEqual(parsed[1]["title"], expected_title)
def test_header_max_length(self):
base_header = "Content-Type: application/x-stuff; title*="
base_header_len = len(base_header)
test_data = [
(MAX_HEADER_LENGTH, {}),
(MAX_HEADER_LENGTH, {"max_length": None}),
(MAX_HEADER_LENGTH + 1, {"max_length": None}),
(100, {"max_length": 100}),
]
for line_length, kwargs in test_data:
with self.subTest(line_length=line_length, kwargs=kwargs):
title = "x" * (line_length - base_header_len)
line = base_header + title
assert len(line) == line_length
parsed = parse_header_parameters(line, **kwargs)
expected = ("content-type: application/x-stuff", {"title": title})
self.assertEqual(parsed, expected)
def test_header_too_long(self):
test_data = [
("x" * (MAX_HEADER_LENGTH + 1), {}),
("x" * 101, {"max_length": 100}),
]
for line, kwargs in test_data:
with self.subTest(line_length=len(line), kwargs=kwargs):
with self.assertRaises(ValueError):
parse_header_parameters(line, **kwargs)
| ParseHeaderParameterTests |
python | kubernetes-client__python | kubernetes/client/models/v1alpha2_lease_candidate_spec.py | {
"start": 383,
"end": 11150
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'binary_version': 'str',
'emulation_version': 'str',
'lease_name': 'str',
'ping_time': 'datetime',
'renew_time': 'datetime',
'strategy': 'str'
}
attribute_map = {
'binary_version': 'binaryVersion',
'emulation_version': 'emulationVersion',
'lease_name': 'leaseName',
'ping_time': 'pingTime',
'renew_time': 'renewTime',
'strategy': 'strategy'
}
def __init__(self, binary_version=None, emulation_version=None, lease_name=None, ping_time=None, renew_time=None, strategy=None, local_vars_configuration=None): # noqa: E501
"""V1alpha2LeaseCandidateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._binary_version = None
self._emulation_version = None
self._lease_name = None
self._ping_time = None
self._renew_time = None
self._strategy = None
self.discriminator = None
self.binary_version = binary_version
if emulation_version is not None:
self.emulation_version = emulation_version
self.lease_name = lease_name
if ping_time is not None:
self.ping_time = ping_time
if renew_time is not None:
self.renew_time = renew_time
self.strategy = strategy
@property
def binary_version(self):
"""Gets the binary_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required. # noqa: E501
:return: The binary_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._binary_version
@binary_version.setter
def binary_version(self, binary_version):
"""Sets the binary_version of this V1alpha2LeaseCandidateSpec.
BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required. # noqa: E501
:param binary_version: The binary_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and binary_version is None: # noqa: E501
raise ValueError("Invalid value for `binary_version`, must not be `None`") # noqa: E501
self._binary_version = binary_version
@property
def emulation_version(self):
"""Gets the emulation_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\" # noqa: E501
:return: The emulation_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._emulation_version
@emulation_version.setter
def emulation_version(self, emulation_version):
"""Sets the emulation_version of this V1alpha2LeaseCandidateSpec.
EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\" # noqa: E501
:param emulation_version: The emulation_version of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: str
"""
self._emulation_version = emulation_version
@property
def lease_name(self):
"""Gets the lease_name of this V1alpha2LeaseCandidateSpec. # noqa: E501
LeaseName is the name of the lease for which this candidate is contending. This field is immutable. # noqa: E501
:return: The lease_name of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._lease_name
@lease_name.setter
def lease_name(self, lease_name):
"""Sets the lease_name of this V1alpha2LeaseCandidateSpec.
LeaseName is the name of the lease for which this candidate is contending. This field is immutable. # noqa: E501
:param lease_name: The lease_name of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and lease_name is None: # noqa: E501
raise ValueError("Invalid value for `lease_name`, must not be `None`") # noqa: E501
self._lease_name = lease_name
@property
def ping_time(self):
"""Gets the ping_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime. # noqa: E501
:return: The ping_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: datetime
"""
return self._ping_time
@ping_time.setter
def ping_time(self, ping_time):
"""Sets the ping_time of this V1alpha2LeaseCandidateSpec.
PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime. # noqa: E501
:param ping_time: The ping_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: datetime
"""
self._ping_time = ping_time
@property
def renew_time(self):
"""Gets the renew_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates. # noqa: E501
:return: The renew_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: datetime
"""
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
"""Sets the renew_time of this V1alpha2LeaseCandidateSpec.
RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates. # noqa: E501
:param renew_time: The renew_time of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: datetime
"""
self._renew_time = renew_time
@property
def strategy(self):
"""Gets the strategy of this V1alpha2LeaseCandidateSpec. # noqa: E501
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. # noqa: E501
:return: The strategy of this V1alpha2LeaseCandidateSpec. # noqa: E501
:rtype: str
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""Sets the strategy of this V1alpha2LeaseCandidateSpec.
Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. # noqa: E501
:param strategy: The strategy of this V1alpha2LeaseCandidateSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and strategy is None: # noqa: E501
raise ValueError("Invalid value for `strategy`, must not be `None`") # noqa: E501
self._strategy = strategy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2LeaseCandidateSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha2LeaseCandidateSpec):
return True
return self.to_dict() != other.to_dict()
| V1alpha2LeaseCandidateSpec |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 39797,
"end": 44406
} | class ____:
"""Use this factory class to create the correct object for the `reranker_config` argument in the `collections.create()` method.
Each staticmethod provides options specific to the named reranker in the function's name. Under-the-hood data validation steps
will ensure that any mis-specifications will be caught before the request is sent to Weaviate.
"""
@staticmethod
def transformers() -> _RerankerProvider:
"""Create a `_RerankerTransformersConfig` object for use when reranking using the `reranker-transformers` module.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/reranker-transformers)
for detailed usage.
"""
return _RerankerTransformersConfig(reranker=Rerankers.TRANSFORMERS)
@staticmethod
def custom(
module_name: str, module_config: Optional[Dict[str, Any]] = None
) -> _RerankerProvider:
"""Create a `_RerankerCustomConfig` object for use when reranking using a custom module.
Args:
module_name: The name of the module to use, REQUIRED.
module_config: The configuration to use for the module. Defaults to `None`, which uses the server-defined default.
"""
return _RerankerCustomConfig(
reranker=_EnumLikeStr(module_name), module_config=module_config
)
@staticmethod
def cohere(
model: Optional[Union[RerankerCohereModel, str]] = None,
) -> _RerankerProvider:
"""Create a `_RerankerCohereConfig` object for use when reranking using the `reranker-cohere` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/reranker)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default
"""
return _RerankerCohereConfig(model=model)
@staticmethod
def jinaai(
model: Optional[Union[RerankerJinaAIModel, str]] = None,
) -> _RerankerProvider:
"""Create a `_RerankerJinaAIConfig` object for use when reranking using the `reranker-jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/reranker)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default
"""
return _RerankerJinaAIConfig(model=model)
@staticmethod
def voyageai(
model: Optional[Union[RerankerVoyageAIModel, str]] = None,
) -> _RerankerProvider:
"""Create a `_RerankerVoyageAIConfig` object for use when reranking using the `reranker-voyageai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/reranker)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default
"""
return _RerankerVoyageAIConfig(model=model)
@staticmethod
def nvidia(
model: Optional[str] = None,
base_url: Optional[AnyHttpUrl] = None,
) -> _RerankerProvider:
"""Create a `_RerankerNvidiaConfig` object for use when reranking using the `reranker-nvidia` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/reranker)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default
base_url: The base URL to send the reranker requests to. Defaults to `None`, which uses the server-defined default.
"""
return _RerankerNvidiaConfig(model=model, baseURL=base_url)
@staticmethod
def contextualai(
model: Optional[str] = None,
instruction: Optional[str] = None,
top_n: Optional[int] = None,
) -> _RerankerProvider:
"""Create a `_RerankerContextualAIConfig` object for use when reranking using the `reranker-contextualai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/contextualai/reranker)
for detailed usage.
Args:
model: The model to use. Defaults to `None`, which uses the server-defined default
instruction: Custom instructions for reranking. Defaults to `None`.
top_n: Number of top results to return. Defaults to `None`, which uses the server-defined default.
"""
return _RerankerContextualAIConfig(model=model, instruction=instruction, topN=top_n)
| _Reranker |
python | tornadoweb__tornado | tornado/auth.py | {
"start": 3483,
"end": 11221
} | class ____:
"""Abstract implementation of OpenID and Attribute Exchange.
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
def authenticate_redirect(
self,
callback_uri: Optional[str] = None,
ax_attrs: List[str] = ["name", "email", "language", "username"],
) -> None:
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 6.0
The ``callback`` argument was removed and this method no
longer returns an awaitable object. It is now an ordinary
synchronous function.
"""
handler = cast(RequestHandler, self)
callback_uri = callback_uri or handler.request.uri
assert callback_uri is not None
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
endpoint = self._OPENID_ENDPOINT # type: ignore
handler.redirect(endpoint + "?" + urllib.parse.urlencode(args))
async def get_authenticated_user(
self, http_client: Optional[httpclient.AsyncHTTPClient] = None
) -> Dict[str, Any]:
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
awaitable object instead.
"""
handler = cast(RequestHandler, self)
# Verify the OpenID response via direct request to the OP
args = {
k: v[-1] for k, v in handler.request.arguments.items()
} # type: Dict[str, Union[str, bytes]]
args["openid.mode"] = "check_authentication"
url = self._OPENID_ENDPOINT # type: ignore
if http_client is None:
http_client = self.get_auth_http_client()
resp = await http_client.fetch(
url, method="POST", body=urllib.parse.urlencode(args)
)
return self._on_authentication_verified(resp)
def _openid_args(
self,
callback_uri: str,
ax_attrs: Iterable[str] = [],
oauth_scope: Optional[str] = None,
) -> Dict[str, str]:
handler = cast(RequestHandler, self)
url = urllib.parse.urljoin(handler.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urllib.parse.urljoin(url, "/"),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update(
{
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
}
)
ax_attrs = set(ax_attrs)
required = [] # type: List[str]
if "name" in ax_attrs:
ax_attrs -= {"name", "firstname", "fullname", "lastname"}
required += ["firstname", "fullname", "lastname"]
args.update(
{
"openid.ax.type.firstname": "http://axschema.org/namePerson/first",
"openid.ax.type.fullname": "http://axschema.org/namePerson",
"openid.ax.type.lastname": "http://axschema.org/namePerson/last",
}
)
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update(
{
"openid.ns.oauth": "http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": handler.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
}
)
return args
def _on_authentication_verified(
self, response: httpclient.HTTPResponse
) -> Dict[str, Any]:
handler = cast(RequestHandler, self)
if b"is_valid:true" not in response.body:
raise AuthError("Invalid OpenID response: %r" % response.body)
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for key in handler.request.arguments:
if (
key.startswith("openid.ns.")
and handler.get_argument(key) == "http://openid.net/srv/ax/1.0"
):
ax_ns = key[10:]
break
def get_ax_arg(uri: str) -> str:
if not ax_ns:
return ""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in handler.request.arguments.keys():
if handler.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix) :]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return ""
return handler.get_argument(ax_name, "")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = " ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = handler.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
return user
def get_auth_http_client(self) -> httpclient.AsyncHTTPClient:
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
| OpenIdMixin |
python | ray-project__ray | doc/source/ray-overview/examples/e2e-multimodal-ai-workloads/doggos/doggos/data.py | {
"start": 198,
"end": 1405
} | class ____:
"""Preprocessor class."""
def __init__(self, class_to_label=None):
self.class_to_label = class_to_label or {} # mutable defaults
self.label_to_class = {v: k for k, v in self.class_to_label.items()}
def fit(self, ds, column):
self.classes = ds.unique(column=column)
self.class_to_label = {tag: i for i, tag in enumerate(self.classes)}
self.label_to_class = {v: k for k, v in self.class_to_label.items()}
return self
def transform(self, ds):
ds = ds.map(
convert_to_label,
fn_kwargs={"class_to_label": self.class_to_label},
)
ds = ds.map_batches(
EmbedImages,
fn_constructor_kwargs={
"model_id": "openai/clip-vit-base-patch32",
"device": "cuda",
}, # class kwargs
fn_kwargs={},
compute=ray.data.ActorPoolStrategy(size=4),
batch_size=64,
num_gpus=1,
accelerator_type="T4",
)
ds = ds.drop_columns(["image"])
return ds
def save(self, fp):
with open(fp, "w") as f:
json.dump(self.class_to_label, f)
| Preprocessor |
python | ray-project__ray | python/ray/air/util/tensor_extensions/arrow.py | {
"start": 36832,
"end": 41897
} | class ____(
ArrowExtensionSerializeDeserializeCache, pa.ExtensionType
):
"""
Arrow ExtensionType for an array of heterogeneous-shaped, homogeneous-typed
tensors.
This is the Arrow side of ``TensorDtype`` for tensor elements with different shapes.
NOTE: This extension only supports tensor elements with non-ragged, well-defined
shapes; i.e. every tensor element must have a well-defined shape and all of their
shapes have to have same number of dimensions (ie ``len(shape)`` has to be the
same for all of them).
See Arrow extension type docs:
https://arrow.apache.org/docs/python/extending_types.html#defining-extension-types-user-defined-types
"""
OFFSET_DTYPE = pa.int64()
def __init__(self, dtype: pa.DataType, ndim: int):
"""
Construct the Arrow extension type for array of heterogeneous-shaped tensors.
Args:
dtype: pyarrow dtype of tensor elements.
ndim: The number of dimensions in the tensor elements.
"""
self._ndim = ndim
super().__init__(
pa.struct(
[("data", pa.large_list(dtype)), ("shape", pa.list_(self.OFFSET_DTYPE))]
),
"ray.data.arrow_variable_shaped_tensor",
)
def to_pandas_dtype(self):
"""
Convert Arrow extension type to corresponding Pandas dtype.
Returns:
An instance of pd.api.extensions.ExtensionDtype.
"""
from ray.air.util.tensor_extensions.pandas import TensorDtype
return TensorDtype(
self.shape,
self.storage_type["data"].type.value_type.to_pandas_dtype(),
)
@property
def ndim(self) -> int:
"""Return the number of dimensions in the tensor elements."""
return self._ndim
@property
def shape(self) -> Tuple[None, ...]:
return (None,) * self.ndim
@property
def scalar_type(self) -> pa.DataType:
"""Returns the type of the underlying tensor elements."""
data_field_index = self.storage_type.get_field_index("data")
return self.storage_type[data_field_index].type.value_type
def __reduce__(self):
return self.__arrow_ext_deserialize__, (
self.storage_type,
self.__arrow_ext_serialize__(),
)
def _arrow_ext_serialize_compute(self):
if ARROW_EXTENSION_SERIALIZATION_FORMAT == _SerializationFormat.CLOUDPICKLE:
return cloudpickle.dumps(self._ndim)
elif ARROW_EXTENSION_SERIALIZATION_FORMAT == _SerializationFormat.JSON:
return json.dumps(self._ndim).encode()
else:
raise ValueError(
f"Invalid serialization format: {ARROW_EXTENSION_SERIALIZATION_FORMAT}"
)
@classmethod
def _get_deserialize_parameter(cls, storage_type, serialized):
return (serialized, storage_type["data"].type.value_type)
@classmethod
def _arrow_ext_deserialize_compute(cls, serialized, value_type):
ndim = _deserialize_with_fallback(serialized, "ndim")
return cls(value_type, ndim)
def __arrow_ext_class__(self):
"""
ExtensionArray subclass with custom logic for this array of tensors
type.
Returns:
A subclass of pd.api.extensions.ExtensionArray.
"""
return ArrowVariableShapedTensorArray
def __arrow_ext_scalar_class__(self):
"""
ExtensionScalar subclass with custom logic for this array of tensors type.
"""
return ArrowTensorScalar
def __str__(self) -> str:
dtype = self.storage_type["data"].type.value_type
return f"ArrowVariableShapedTensorType(ndim={self.ndim}, dtype={dtype})"
def __repr__(self) -> str:
return str(self)
def __eq__(self, other):
# NOTE: This check is deliberately not comparing the ``ndim`` since
# we allow tensor types w/ varying ``ndim``s to be combined
return (
isinstance(other, ArrowVariableShapedTensorType)
and other.extension_name == self.extension_name
and other.scalar_type == self.scalar_type
)
def __ne__(self, other):
# NOTE: We override ``__ne__`` to override base class' method
return not self.__eq__(other)
def __hash__(self) -> int:
return hash((self.extension_name, self.scalar_type))
def _extension_scalar_to_ndarray(self, scalar: "pa.ExtensionScalar") -> np.ndarray:
"""
Convert an ExtensionScalar to a tensor element.
"""
# Handle None/null values
if scalar.value is None:
return None
data = scalar.value.get("data")
raw_values = data.values
value_type = raw_values.type
offset = raw_values.offset
data_buffer = raw_values.buffers()[1]
shape = tuple(scalar.value.get("shape").as_py())
return _to_ndarray_helper(shape, value_type, offset, data_buffer)
@PublicAPI(stability="alpha")
| ArrowVariableShapedTensorType |
python | lazyprogrammer__machine_learning_examples | cnn_class/cifar.py | {
"start": 3001,
"end": 7120
} | class ____(object):
def __init__(self, convpool_layer_sizes, hidden_layer_sizes):
self.convpool_layer_sizes = convpool_layer_sizes
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, Y, lr=1e-4, mu=0.99, reg=1e-6, decay=0.99999, eps=1e-2, batch_sz=30, epochs=100, show_fig=True):
lr = np.float32(lr)
mu = np.float32(mu)
reg = np.float32(reg)
decay = np.float32(decay)
eps = np.float32(eps)
# make a validation set
X, Y = shuffle(X, Y)
X = X.astype(np.float32)
Y = Y.astype(np.int32)
Xvalid, Yvalid = X[-1000:], Y[-1000:]
X, Y = X[:-1000], Y[:-1000]
# initialize convpool layers
N, c, width, height = X.shape
mi = c
outw = width
outh = height
self.convpool_layers = []
for mo, fw, fh in self.convpool_layer_sizes:
layer = ConvPoolLayer(mi, mo, fw, fh)
self.convpool_layers.append(layer)
outw = (outw - fw + 1) / 2
outh = (outh - fh + 1) / 2
mi = mo
# initialize mlp layers
K = len(set(Y))
self.hidden_layers = []
M1 = self.convpool_layer_sizes[-1][0]*outw*outh # size must be same as output of last convpool layer
count = 0
for M2 in self.hidden_layer_sizes:
h = HiddenLayer(M1, M2, count)
self.hidden_layers.append(h)
M1 = M2
count += 1
# logistic regression layer
W, b = init_weight_and_bias(M1, K)
self.W = theano.shared(W, 'W_logreg')
self.b = theano.shared(b, 'b_logreg')
# collect params for later use
self.params = [self.W, self.b]
for c in self.convpool_layers:
self.params += c.params
for h in self.hidden_layers:
self.params += h.params
# for momentum
dparams = [theano.shared(np.zeros(p.get_value().shape, dtype=np.float32)) for p in self.params]
# for rmsprop
cache = [theano.shared(np.zeros(p.get_value().shape, dtype=np.float32)) for p in self.params]
# set up theano functions and variables
thX = T.tensor4('X', dtype='float32')
thY = T.ivector('Y')
pY = self.forward(thX)
rcost = reg*T.sum([(p*p).sum() for p in self.params])
cost = -T.mean(T.log(pY[T.arange(thY.shape[0]), thY])) + rcost
prediction = self.predict(thX)
cost_predict_op = theano.function(inputs=[thX, thY], outputs=[cost, prediction])
# momentum only
updates = [
(p, p + mu*dp - lr*T.grad(cost, p)) for p, dp in zip(self.params, dparams)
] + [
(dp, mu*dp - lr*T.grad(cost, p)) for p, dp in zip(self.params, dparams)
]
train_op = theano.function(
inputs=[thX, thY],
updates=updates
)
n_batches = N / batch_sz
costs = []
for i in xrange(epochs):
X, Y = shuffle(X, Y)
for j in xrange(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
train_op(Xbatch, Ybatch)
if j % 20 == 0:
c, p = cost_predict_op(Xvalid, Yvalid)
costs.append(c)
e = error_rate(Yvalid, p)
print "i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X):
Z = X
for c in self.convpool_layers:
Z = c.forward(Z)
Z = Z.flatten(ndim=2)
for h in self.hidden_layers:
Z = h.forward(Z)
return T.nnet.softmax(Z.dot(self.W) + self.b)
def predict(self, X):
pY = self.forward(X)
return T.argmax(pY, axis=1)
def main():
X, Y = getImageData()
model = CNN(
convpool_layer_sizes=[(20, 5, 5), (20, 5, 5)],
hidden_layer_sizes=[500, 300],
)
model.fit(X, Y)
if __name__ == '__main__':
main()
| CNN |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 7705,
"end": 8809
} | class ____(nn.ModuleDict):
def __init__(self, config: SwitchTransformersConfig):
super().__init__()
self.num_experts = config.num_experts
for idx in range(config.num_experts):
self[f"expert_{idx}"] = SwitchTransformersDenseActDense(config)
def forward(
self, hidden_states: torch.Tensor, selected_experts: torch.Tensor, routing_weights: torch.Tensor
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
expert_mask = selected_experts.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
current_hidden_states = self[f"expert_{expert_idx[0]}"](current_state) * routing_weights[top_x, idx, None]
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
return final_hidden_states
| SwitchTransformersExperts |
python | kamyu104__LeetCode-Solutions | Python/find-the-largest-palindrome-divisible-by-k.py | {
"start": 61,
"end": 1491
} | class ____(object):
def largestPalindrome(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
def inv(x, p):
return pow(x, p-2, p)
def f(l):
p = 7
result = ['9']*l
if l:
curr = reduce(lambda accu, x: (accu*10+(ord(x)-ord('0')))%p, result, 0)
# l%2 == 0: (curr+(i-9)*11*pow(10, l//2-1, p))%p = 0
# l%2 == 1: (curr+(i-9)*pow(10, l//2, p))%p = 0
i = 9-(curr*inv(11 if l%2 == 0 else 1, p)*inv(pow(10, l//2-int(l%2 == 0), p), p))%p
if i <= 2:
i += p
result[l//2] = result[l//2-int(l%2 == 0)] = str(i)
return "".join(result)
if k in (1, 3, 9):
return '9'*n
if k in (2, 4, 8):
k = min(k, 6)
if n <= k:
return '8'*n
l = k//2
return '8'*l+'9'*(n-k)+'8'*l
if k == 5:
if n <= 2:
return '5'*n
return '5'+'9'*(n-2)+'5'
if k == 6:
if n <= 2:
return '6'*n
if n%2:
l = n//2-1
return '8'+'9'*l+'8'+'9'*l+'8'
l = n//2-2
return '8'+'9'*l+"77"+'9'*l+'8'
l, r = divmod(n, 12)
return "999999"*l+f(r)+"999999"*l # 999999%7 = 0
| Solution |
python | huggingface__transformers | src/transformers/models/musicgen_melody/processing_musicgen_melody.py | {
"start": 896,
"end": 5964
} | class ____(ProcessorMixin):
r"""
Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor
class.
[`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
Args:
feature_extractor (`MusicgenMelodyFeatureExtractor`):
An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`T5Tokenizer`):
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.get_decoder_prompt_ids
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
if len(args) > 0:
kwargs["audio"] = args[0]
return super().__call__(*args, **kwargs)
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.batch_decode with padding_mask->attention_mask
def batch_decode(self, *args, **kwargs):
"""
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
"""
audio_values = kwargs.pop("audio", None)
attention_mask = kwargs.pop("attention_mask", None)
if len(args) > 0:
audio_values = args[0]
args = args[1:]
if audio_values is not None:
return self._decode_audio(audio_values, attention_mask=attention_mask)
else:
return self.tokenizer.batch_decode(*args, **kwargs)
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor._decode_audio with padding_mask->attention_mask
def _decode_audio(self, audio_values, attention_mask: Any = None) -> list[np.ndarray]:
"""
This method strips any padding from the audio values to return a list of numpy audio arrays.
"""
audio_values = to_numpy(audio_values)
bsz, channels, seq_len = audio_values.shape
if attention_mask is None:
return list(audio_values)
attention_mask = to_numpy(attention_mask)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
difference = seq_len - attention_mask.shape[-1]
padding_value = 1 - self.feature_extractor.padding_value
attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
audio_values = audio_values.tolist()
for i in range(bsz):
sliced_audio = np.asarray(audio_values[i])[
attention_mask[i][None, :] != self.feature_extractor.padding_value
]
audio_values[i] = sliced_audio.reshape(channels, -1)
return audio_values
def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"):
"""
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
feature extractor or tokenizer.
Args:
num_samples (int, *optional*):
Number of audio samples to unconditionally generate.
Example:
```python
>>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor
>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody")
>>> # get the unconditional (or 'null') inputs for the model
>>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody")
>>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1)
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
```"""
inputs = self.tokenizer([""] * num_samples, return_tensors=return_tensors, return_attention_mask=True)
inputs["attention_mask"][:] = 0
return inputs
__all__ = ["MusicgenMelodyProcessor"]
| MusicgenMelodyProcessor |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 7161,
"end": 7229
} | class ____(HTTPClientError):
status_code = 402
| HTTPPaymentRequired |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 23671,
"end": 23788
} | class ____(RequestHandler):
def get(self):
self.render("page.html", entries=[1, 2])
| UIModuleResourceHandler |
python | django__django | tests/migrations/test_migrations_manual_porting/0003_third.py | {
"start": 81,
"end": 528
} | class ____(migrations.Migration):
dependencies = [
("migrations", "0002_second"),
]
operations = [
migrations.AlterUniqueTogether(
name="somemodel",
unique_together={("id", "name")},
),
migrations.AlterUniqueTogether(
name="somemodel",
unique_together={("name",)},
),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
| Migration |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 177308,
"end": 179957
} | class ____(Response):
"""
Response of tasks.enqueue endpoint.
:param queued: Number of tasks queued (0 or 1)
:type queued: int
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "enqueue"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"queued": {
"description": "Number of tasks queued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self, queued: Optional[int] = None, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any
) -> None:
super(EnqueueResponse, self).__init__(**kwargs)
self.queued = queued
self.updated = updated
self.fields = fields
@schema_property("queued")
def queued(self) -> Optional[int]:
return self._property_queued
@queued.setter
def queued(self, value: Optional[int]) -> None:
if value is None:
self._property_queued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "queued", six.integer_types)
self._property_queued = value
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EnqueueResponse |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/sessionmakers.py | {
"start": 659,
"end": 1485
} | class ____(AsyncSession):
pass
def async_session_factory(
engine: AsyncEngine,
) -> async_sessionmaker[MyAsyncSession]:
return async_sessionmaker(engine, class_=MyAsyncSession)
def async_scoped_session_factory(
engine: AsyncEngine,
) -> async_scoped_session[MyAsyncSession]:
return async_scoped_session(
async_sessionmaker(engine, class_=MyAsyncSession),
scopefunc=lambda: None,
)
async def async_main() -> None:
fac = async_session_factory(async_engine)
async with fac() as sess:
assert_type(sess, MyAsyncSession)
async with fac.begin() as sess:
assert_type(sess, MyAsyncSession)
scoped_fac = async_scoped_session_factory(async_engine)
sess = scoped_fac()
assert_type(sess, MyAsyncSession)
engine = create_engine("...")
| MyAsyncSession |
python | bokeh__bokeh | src/bokeh/core/has_props.py | {
"start": 8984,
"end": 9059
} | class ____:
"""Resolve this class by a fully qualified name. """
| Qualified |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_dms.py | {
"start": 1401,
"end": 1622
} | class ____:
EXPECTED_WAITER_NAME: str | None = None
def test_setup(self):
if self.__class__.__name__ != "TestBaseDmsTrigger":
assert isinstance(self.EXPECTED_WAITER_NAME, str)
| TestBaseDmsTrigger |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_with_poly.py | {
"start": 712,
"end": 1235
} | class ____(_Polymorphic, _PolymorphicFixtureBase):
def test_no_use_flat_and_aliased(self):
sess = fixture_session()
subq = sess.query(Person).subquery()
testing.assert_raises_message(
exc.ArgumentError,
"the 'flat' and 'selectable' arguments cannot be passed "
"simultaneously to with_polymorphic()",
with_polymorphic,
Person,
[Engineer],
selectable=subq,
flat=True,
)
| WithPolymorphicAPITest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_checks/asset_check_factories/schema_change_checks.py | {
"start": 6354,
"end": 6421
} | class ____(BaseModel):
old_type: str
new_type: str
| TypeChange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.