language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 4659,
"end": 4834
} | class ____(BaseEstimator):
def fit(self, X, y=None):
self._good_attribute = 1
X, y = validate_data(self, X, y)
return self
| ChangesUnderscoreAttribute |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni/objects.py | {
"start": 535,
"end": 817
} | class ____:
name: str
verified: bool
@classmethod
def from_json(cls, data: dict[str, Any]) -> "OmniLabel":
"""Create OmniLabel from JSON response data."""
return cls(name=data["name"], verified=data["verified"])
@whitelist_for_serdes
@record
| OmniLabel |
python | pytest-dev__pytest | testing/test_debugging.py | {
"start": 32744,
"end": 36605
} | class ____:
@pytest.mark.parametrize("arg", ["--pdb", ""])
def test_sys_breakpointhook_configure_and_unconfigure(
self, pytester: Pytester, arg: str
) -> None:
"""
Test that sys.breakpointhook is set to the custom Pdb class once configured, test that
hook is reset to system value once pytest has been unconfigured
"""
pytester.makeconftest(
"""
import sys
from pytest import hookimpl
from _pytest.debugging import pytestPDB
def pytest_configure(config):
config.add_cleanup(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
assert sys.breakpointhook == pytestPDB.set_trace
"""
)
pytester.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
def test_pdb_custom_cls(
self, pytester: Pytester, custom_debugger_hook, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.delenv("PYTHONBREAKPOINT", raising=False)
p1 = pytester.makepyfile(
"""
def test_nothing():
breakpoint()
"""
)
result = pytester.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomDebugger", p1
)
result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"])
assert custom_debugger_hook == ["init", "set_trace"]
@pytest.mark.parametrize("arg", ["--pdb", ""])
def test_environ_custom_class(
self, pytester: Pytester, custom_debugger_hook, arg: str
) -> None:
pytester.makeconftest(
"""
import os
import sys
os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'
def pytest_configure(config):
config.add_cleanup(check_restored)
def check_restored():
assert sys.breakpointhook == sys.__breakpointhook__
def test_check():
import _pytest
assert sys.breakpointhook is _pytest._CustomDebugger.set_trace
"""
)
pytester.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
def test_sys_breakpoint_interception(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.delenv("PYTHONBREAKPOINT", raising=False)
p1 = pytester.makepyfile(
"""
def test_1():
breakpoint()
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("quit")
rest = child.read().decode("utf8")
assert "Quitting debugger" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
@pytest.mark.xfail(reason="#10042", strict=False)
def test_pdb_not_altered(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
assert 0
"""
)
child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("c")
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
TestPDB.flush(child)
| TestDebuggingBreakpoints |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_worksheet09.py | {
"start": 382,
"end": 4746
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with a blank cell."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank(0, 0, None)
worksheet.write_blank(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
def test_assemble_xml_file_write(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank("A1", None)
worksheet.write_blank("C2", None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py | {
"start": 144010,
"end": 154352
} | class ____(AllRepositoryGraphQLContextTestMatrix):
def test_cross_repo_derived_asset_dependencies(self, graphql_context: WorkspaceRequestContext):
result = execute_dagster_graphql(
graphql_context,
CROSS_REPO_ASSET_GRAPH,
)
asset_nodes = result.data["assetNodes"]
derived_asset = next(
node
for node in asset_nodes
if node["id"] == 'cross_asset_repos.upstream_assets_repository.["derived_asset"]'
)
dependent_asset_keys = [
{"path": ["downstream_asset1"]},
{"path": ["downstream_asset2"]},
]
result_dependent_keys = sorted(
derived_asset["dependedByKeys"], key=lambda node: node.get("path")[0]
)
assert result_dependent_keys == dependent_asset_keys
def test_cross_repo_source_asset_dependencies(self, graphql_context: WorkspaceRequestContext):
result = execute_dagster_graphql(
graphql_context,
CROSS_REPO_ASSET_GRAPH,
)
asset_nodes = result.data["assetNodes"]
always_source_asset = next(
node for node in asset_nodes if "always_source_asset" in node["id"]
)
dependent_asset_keys = [
{"path": ["downstream_asset1"]},
{"path": ["downstream_asset2"]},
]
result_dependent_keys = sorted(
always_source_asset["dependedByKeys"], key=lambda node: node.get("path")[0]
)
assert result_dependent_keys == dependent_asset_keys
def test_cross_repo_observable_source_asset(self, graphql_context: WorkspaceRequestContext):
"""Ensure that when retrieving an asset that is observable in one repo and not in another,
we correctly represent it as observable when retrieving information about the asset key in
general.
"""
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_IS_OBSERVABLE,
variables={"assetKey": {"path": ["sometimes_observable_source_asset"]}},
)
asset = result.data["assetOrError"]
assert asset["definition"]["assetKey"]["path"] == ["sometimes_observable_source_asset"]
assert asset["definition"]["isObservable"] is True
def get_partitioned_asset_repo():
static_partitions_def = StaticPartitionsDefinition(["a", "b", "c", "d"])
@asset(partitions_def=static_partitions_def)
def abc_asset(_):
yield AssetMaterialization(asset_key="abc_asset", partition="invalid_partition_key")
yield Output(5)
daily_partitions_def = DailyPartitionsDefinition(start_date="2022-01-01")
@asset(partitions_def=daily_partitions_def)
def daily_asset(_):
# invalid partition key
yield AssetMaterialization(asset_key="daily_asset", partition="2021-01-01")
yield Output(5)
multipartitions_def = MultiPartitionsDefinition(
{
"abcd": static_partitions_def,
"date": daily_partitions_def,
}
)
@asset(partitions_def=multipartitions_def)
def multipartitions_asset(_):
return 1
@repository
def partitioned_asset_repo():
return [
abc_asset,
define_asset_job("abc_asset_job", AssetSelection.assets("abc_asset")),
daily_asset,
define_asset_job("daily_asset_job", AssetSelection.assets("daily_asset")),
multipartitions_asset,
define_asset_job(
"multipartitions_job",
AssetSelection.assets("multipartitions_asset"),
partitions_def=multipartitions_def,
),
]
return partitioned_asset_repo
def test_1d_subset_backcompat():
with instance_for_test(synchronous_run_coordinator=True) as instance:
instance.can_read_asset_status_cache = lambda: False
assert instance.can_read_asset_status_cache() is False
with define_out_of_process_context(
__file__, "get_partitioned_asset_repo", instance
) as graphql_context:
abc_selector = infer_job_selector(graphql_context, "abc_asset_job")
result = execute_dagster_graphql(
graphql_context,
GET_1D_ASSET_PARTITIONS,
variables={"pipelineSelector": abc_selector},
)
assert result.data
assert len(result.data["assetNodes"]) == 1
assert (
result.data["assetNodes"][0]["assetPartitionStatuses"]["materializedPartitions"]
== []
)
assert set(
result.data["assetNodes"][0]["assetPartitionStatuses"]["unmaterializedPartitions"]
) == {"a", "b", "c", "d"}
for partition in ["a", "c", "d"]:
_create_partitioned_run(graphql_context, "abc_asset_job", partition)
result = execute_dagster_graphql(
graphql_context,
GET_1D_ASSET_PARTITIONS,
variables={"pipelineSelector": abc_selector},
)
assert result.data
assert set(
result.data["assetNodes"][0]["assetPartitionStatuses"]["materializedPartitions"]
) == {
"a",
"c",
"d",
}
assert set(
result.data["assetNodes"][0]["assetPartitionStatuses"]["unmaterializedPartitions"]
) == {
"b",
}
daily_job_selector = infer_job_selector(graphql_context, "daily_asset_job")
result = execute_dagster_graphql(
graphql_context,
GET_1D_ASSET_PARTITIONS,
variables={"pipelineSelector": daily_job_selector},
)
assert result.data
assert len(result.data["assetNodes"]) == 1
assert result.data["assetNodes"][0]["assetPartitionStatuses"]["ranges"] == []
for partition in ["2022-03-03", "2022-03-05", "2022-03-06"]:
_create_partitioned_run(graphql_context, "daily_asset_job", partition)
result = execute_dagster_graphql(
graphql_context,
GET_1D_ASSET_PARTITIONS,
variables={"pipelineSelector": daily_job_selector},
)
assert result.data
ranges = result.data["assetNodes"][0]["assetPartitionStatuses"]["ranges"]
assert len(ranges) == 2
assert ranges[0]["startKey"] == "2022-03-03"
assert ranges[0]["endKey"] == "2022-03-03"
assert ranges[1]["startKey"] == "2022-03-05"
assert ranges[1]["endKey"] == "2022-03-06"
result = execute_dagster_graphql(
graphql_context,
GET_PARTITION_STATS,
variables={"pipelineSelector": daily_job_selector},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 1
assert result.data["assetNodes"][0]["partitionStats"]["numMaterialized"] == 3
def test_2d_subset_backcompat():
with instance_for_test(synchronous_run_coordinator=True) as instance:
instance.can_read_asset_status_cache = lambda: False
assert instance.can_read_asset_status_cache() is False
with define_out_of_process_context(
__file__, "get_partitioned_asset_repo", instance
) as graphql_context:
multipartitions_selector = infer_job_selector(graphql_context, "multipartitions_job")
result = execute_dagster_graphql(
graphql_context,
GET_2D_ASSET_PARTITIONS,
variables={"pipelineSelector": multipartitions_selector},
)
assert result.data
assert len(result.data["assetNodes"]) == 1
assert result.data["assetNodes"][0]["assetPartitionStatuses"]["ranges"] == []
for partition_fields in [
("2022-03-03", "a"),
("2022-03-03", "c"),
("2022-03-04", "a"),
("2022-03-04", "c"),
("2022-03-06", "a"),
("2022-03-06", "c"),
]:
partition_key = MultiPartitionKey(
{"date": partition_fields[0], "abcd": partition_fields[1]}
)
_create_partitioned_run(graphql_context, "multipartitions_job", partition_key)
result = execute_dagster_graphql(
graphql_context,
GET_2D_ASSET_PARTITIONS,
variables={"pipelineSelector": multipartitions_selector},
)
ranges = result.data["assetNodes"][0]["assetPartitionStatuses"]["ranges"]
assert len(ranges) == 2
assert ranges[0]["primaryDimStartKey"] == "2022-03-03"
assert ranges[0]["primaryDimEndKey"] == "2022-03-04"
assert set(ranges[0]["secondaryDim"]["materializedPartitions"]) == {
"a",
"c",
}
assert set(ranges[0]["secondaryDim"]["unmaterializedPartitions"]) == {
"b",
"d",
}
assert ranges[1]["primaryDimStartKey"] == "2022-03-06"
assert ranges[1]["primaryDimEndKey"] == "2022-03-06"
assert len(ranges[1]["secondaryDim"]["materializedPartitions"]) == 2
assert set(ranges[1]["secondaryDim"]["materializedPartitions"]) == {
"a",
"c",
}
assert set(ranges[1]["secondaryDim"]["unmaterializedPartitions"]) == {
"b",
"d",
}
def test_concurrency_assets(graphql_context: WorkspaceRequestContext):
def _graphql_pool(asset_key):
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_CONCURRENCY_GROUP,
variables={"assetKey": {"path": asset_key.path}},
)
return set(result.data["assetNodeOrError"]["pools"])
assert _graphql_pool(AssetKey(["concurrency_asset"])) == {"foo"}
assert _graphql_pool(AssetKey(["concurrency_graph_asset"])) == {"bar", "baz"}
assert _graphql_pool(AssetKey(["concurrency_multi_asset_1"])) == {"buzz"}
| TestCrossRepoAssetDependedBy |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_H.py | {
"start": 8677,
"end": 10119
} | class ____(Benchmark):
r"""
HolderTable objective function.
This class defines the HolderTable [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HolderTable}}({x}) = - \left|{e^{\left|{1
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi} }\right|}
\sin\left(x_{1}\right) \cos\left(x_{2}\right)}\right|
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -19.20850256788675` for
:math:`x_i = \pm 9.664590028909654` for :math:`i = 1, 2`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil #146 equation is wrong - should be squaring the x1 and x2
terms, but isn't. Gavana does.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(8.055023472141116, 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116, -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def fun(self, x, *args):
self.nfev += 1
return -abs(sin(x[0]) * cos(x[1])
* exp(abs(1 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))
| HolderTable |
python | kamyu104__LeetCode-Solutions | Python/concatenated-divisibility.py | {
"start": 78,
"end": 1530
} | class ____(object):
def concatenatedDivisibility(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
def length(x):
l = 0
while x:
l += 1
x //= 10
return max(l, 1)
lookup = [length(x) for x in nums]
mx = max(lookup)
pow10 = [0]*(mx+1)
pow10[0] = 1%k
for i in xrange(len(pow10)-1):
pow10[i+1] = (pow10[i]*10)%k
dp = [[False]*k for _ in xrange(1<<len(nums))]
dp[-1][0] = True
for mask in reversed(xrange(len(dp)-1)):
for r in xrange(k):
for i, l in enumerate(lookup):
if mask&(1<<i):
continue
if dp[mask|(1<<i)][(r*pow10[l]+nums[i])%k]:
dp[mask][r] = True
break
result = []
if not dp[0][0]:
return result
order = sorted((x, i) for i, x in enumerate(nums))
mask = r = 0
for _ in xrange(len(nums)):
for _, i in order:
if mask&(1<<i):
continue
if dp[mask|(1<<i)][(r*pow10[lookup[i]]+nums[i])%k]:
result.append(nums[i])
mask |= 1<<i
r = (r*pow10[lookup[i]]+nums[i])%k
break
return result
| Solution |
python | huggingface__transformers | src/transformers/models/mpt/modeling_mpt.py | {
"start": 8836,
"end": 9037
} | class ____(PreTrainedModel):
config: MptConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["MptBlock"]
@auto_docstring
| MptPreTrainedModel |
python | numpy__numpy | numpy/f2py/tests/test_callback.py | {
"start": 192,
"end": 5158
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "foo.f")]
@pytest.mark.parametrize("name", ["t", "t2"])
@pytest.mark.slow
def test_all(self, name):
self.check_function(name)
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
""")
assert self.module.t.__doc__ == expected
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert r == 4
r = t(lambda a: 5, fun_extra_args=(6, ))
assert r == 5
r = t(lambda a: a, fun_extra_args=(6, ))
assert r == 6
r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert r == 12
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(self.module.func, fun_extra_args=(6, ))
assert r == 17
r = t(self.module.func0)
assert r == 11
r = t(self.module.func0._cpointer)
assert r == 11
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r == 7
r = t(a.mth)
assert r == 9
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback(self):
def callback(code):
if code == "r":
return 0
else:
return 1
f = self.module.string_callback
r = f(callback)
assert r == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback_array(self):
# See gh-10027
cu1 = np.zeros((1, ), "S8")
cu2 = np.zeros((1, 8), "c")
cu3 = np.array([""], "S8")
def callback(cu, lencu):
if cu.shape != (lencu,):
return 1
if cu.dtype != "S8":
return 2
if not np.all(cu == b""):
return 3
return 0
f = self.module.string_callback_array
for cu in [cu1, cu2, cu3]:
res = f(callback, cu, cu.size)
assert res == 0
def test_threadsafety(self):
# Segfaults if the callback handling is not threadsafe
errors = []
def cb():
# Sleep here to make it more likely for another thread
# to call their callback at the same time.
time.sleep(1e-3)
# Check reentrancy
r = self.module.t(lambda: 123)
assert r == 123
return 42
def runner(name):
try:
for j in range(50):
r = self.module.t(cb)
assert r == 42
self.check_function(name)
except Exception:
errors.append(traceback.format_exc())
threads = [
threading.Thread(target=runner, args=(arg, ))
for arg in ("t", "t2") for n in range(20)
]
for t in threads:
t.start()
for t in threads:
t.join()
errors = "\n\n".join(errors)
if errors:
raise AssertionError(errors)
def test_hidden_callback(self):
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
try:
self.module.hidden_callback2(2)
except Exception as msg:
assert str(msg).startswith("cb: Callback global_f not defined")
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
assert r == 3
self.module.global_f = lambda x: x + 2
r = self.module.hidden_callback(2)
assert r == 4
del self.module.global_f
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
assert r == 5
# reproducer of gh18341
r = self.module.hidden_callback2(2)
assert r == 3
| TestF77Callback |
python | giampaolo__psutil | tests/test_process_all.py | {
"start": 2655,
"end": 15378
} | class ____(PsutilTestCase):
"""Test which iterates over all running processes and performs
some sanity checks against Process API's returned values.
Uses a process pool to get info about all processes.
"""
def setUp(self):
psutil._set_debug(False)
# Using a pool in a CI env may result in deadlock, see:
# https://github.com/giampaolo/psutil/issues/2104
if USE_PROC_POOL:
# The 'fork' method is the only one that does not
# create a "resource_tracker" process. The problem
# when creating this process is that it ignores
# SIGTERM and SIGINT, and this makes "reap_children"
# hang... The following code should run on python-3.4
# and later.
multiprocessing.set_start_method('fork')
self.pool = multiprocessing.Pool()
def tearDown(self):
psutil._set_debug(True)
if USE_PROC_POOL:
self.pool.terminate()
self.pool.join()
def iter_proc_info(self):
# Fixes "can't pickle <function proc_info>: it's not the
# same object as test_process_all.proc_info".
from tests.test_process_all import proc_info
if USE_PROC_POOL:
return self.pool.imap_unordered(proc_info, psutil.pids())
else:
ls = [proc_info(pid) for pid in psutil.pids()]
return ls
def test_all(self):
failures = []
for info in self.iter_proc_info():
for name, value in info.items():
meth = getattr(self, name)
try:
meth(value, info)
except Exception: # noqa: BLE001
s = '\n' + '=' * 70 + '\n'
s += (
"FAIL: name=test_{}, pid={}, ret={}\ninfo={}\n".format(
name,
info['pid'],
repr(value),
info,
)
)
s += '-' * 70
s += f"\n{traceback.format_exc()}"
s = "\n".join((" " * 4) + i for i in s.splitlines()) + "\n"
failures.append(s)
else:
if value not in (0, 0.0, [], None, '', {}):
assert value, value
if failures:
return pytest.fail(''.join(failures))
def cmdline(self, ret, info):
assert isinstance(ret, list)
for part in ret:
assert isinstance(part, str)
def exe(self, ret, info):
assert isinstance(ret, str)
assert ret.strip() == ret
if ret:
if WINDOWS and not ret.endswith('.exe'):
return # May be "Registry", "MemCompression", ...
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX and os.path.isfile(ret):
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX: may fail on MACOS
try:
assert os.access(ret, os.X_OK)
except AssertionError:
if os.path.exists(ret) and not CI_TESTING:
raise
def pid(self, ret, info):
assert isinstance(ret, int)
assert ret >= 0
def ppid(self, ret, info):
assert isinstance(ret, int)
assert ret >= 0
proc_info(ret)
def name(self, ret, info):
assert isinstance(ret, str)
if WINDOWS and not ret and is_win_secure_system_proc(info['pid']):
# https://github.com/giampaolo/psutil/issues/2338
return
# on AIX, "<exiting>" processes don't have names
if not AIX:
assert ret, repr(ret)
def create_time(self, ret, info):
assert isinstance(ret, float)
try:
assert ret >= 0
except AssertionError:
# XXX
if OPENBSD and info['status'] == psutil.STATUS_ZOMBIE:
pass
else:
raise
# this can't be taken for granted on all platforms
# assert ret >= psutil.boot_time())
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret, info):
assert is_namedtuple(ret)
for uid in ret:
assert isinstance(uid, int)
assert uid >= 0
def gids(self, ret, info):
assert is_namedtuple(ret)
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
assert isinstance(gid, int)
if not MACOS and not NETBSD:
assert gid >= 0
def username(self, ret, info):
assert isinstance(ret, str)
assert ret.strip() == ret
assert ret.strip()
def status(self, ret, info):
assert isinstance(ret, str)
assert ret, ret
assert ret != '?' # XXX
assert ret in VALID_PROC_STATUSES
def io_counters(self, ret, info):
assert is_namedtuple(ret)
for field in ret:
assert isinstance(field, int)
if field != -1:
assert field >= 0
def ionice(self, ret, info):
if LINUX:
assert isinstance(ret.ioclass, int)
assert isinstance(ret.value, int)
assert ret.ioclass >= 0
assert ret.value >= 0
else: # Windows, Cygwin
choices = [
psutil.IOPRIO_VERYLOW,
psutil.IOPRIO_LOW,
psutil.IOPRIO_NORMAL,
psutil.IOPRIO_HIGH,
]
assert isinstance(ret, int)
assert ret >= 0
assert ret in choices
def num_threads(self, ret, info):
assert isinstance(ret, int)
if WINDOWS and ret == 0 and is_win_secure_system_proc(info['pid']):
# https://github.com/giampaolo/psutil/issues/2338
return
assert ret >= 1
def threads(self, ret, info):
assert isinstance(ret, list)
for t in ret:
assert is_namedtuple(t)
assert t.id >= 0
assert t.user_time >= 0
assert t.system_time >= 0
for field in t:
assert isinstance(field, (int, float))
def cpu_times(self, ret, info):
assert is_namedtuple(ret)
for n in ret:
assert isinstance(n, float)
assert n >= 0
# TODO: check ntuple fields
def cpu_percent(self, ret, info):
assert isinstance(ret, float)
assert 0.0 <= ret <= 100.0, ret
def cpu_num(self, ret, info):
assert isinstance(ret, int)
if FREEBSD and ret == -1:
return
assert ret >= 0
if psutil.cpu_count() == 1:
assert ret == 0
assert ret in list(range(psutil.cpu_count()))
def memory_info(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
assert isinstance(value, int)
assert value >= 0
if WINDOWS:
assert ret.peak_wset >= ret.wset
assert ret.peak_paged_pool >= ret.paged_pool
assert ret.peak_nonpaged_pool >= ret.nonpaged_pool
assert ret.peak_pagefile >= ret.pagefile
def memory_full_info(self, ret, info):
assert is_namedtuple(ret)
total = psutil.virtual_memory().total
for name in ret._fields:
value = getattr(ret, name)
assert isinstance(value, int)
assert value >= 0
if LINUX or (OSX and name in {'vms', 'data'}):
# On Linux there are processes (e.g. 'goa-daemon') whose
# VMS is incredibly high for some reason.
continue
assert value <= total, name
if LINUX:
assert ret.pss >= ret.uss
def open_files(self, ret, info):
assert isinstance(ret, list)
for f in ret:
assert isinstance(f.fd, int)
assert isinstance(f.path, str)
assert f.path.strip() == f.path
if WINDOWS:
assert f.fd == -1
elif LINUX:
assert isinstance(f.position, int)
assert isinstance(f.mode, str)
assert isinstance(f.flags, int)
assert f.position >= 0
assert f.mode in {'r', 'w', 'a', 'r+', 'a+'}
assert f.flags > 0
elif BSD and not f.path:
# XXX see: https://github.com/giampaolo/psutil/issues/595
continue
assert os.path.isabs(f.path), f
try:
st = os.stat(f.path)
except FileNotFoundError:
pass
else:
assert stat.S_ISREG(st.st_mode), f
def num_fds(self, ret, info):
assert isinstance(ret, int)
assert ret >= 0
def net_connections(self, ret, info):
with create_sockets():
assert len(ret) == len(set(ret))
for conn in ret:
assert is_namedtuple(conn)
check_connection_ntuple(conn)
def cwd(self, ret, info):
assert isinstance(ret, str)
assert ret.strip() == ret
if ret:
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError as err:
if WINDOWS and psutil._psplatform.is_permission_err(err):
pass
# directory has been removed in mean time
elif err.errno != errno.ENOENT:
raise
else:
assert stat.S_ISDIR(st.st_mode)
def memory_percent(self, ret, info):
assert isinstance(ret, float)
assert 0 <= ret <= 100, ret
def is_running(self, ret, info):
assert isinstance(ret, bool)
def cpu_affinity(self, ret, info):
assert isinstance(ret, list)
assert ret != []
cpus = list(range(psutil.cpu_count()))
for n in ret:
assert isinstance(n, int)
assert n in cpus
def terminal(self, ret, info):
assert isinstance(ret, (str, type(None)))
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def memory_maps(self, ret, info):
for nt in ret:
assert isinstance(nt.addr, str)
assert isinstance(nt.perms, str)
assert isinstance(nt.path, str)
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if value.startswith(("[", "anon_inode:")): # linux
continue
if BSD and value == "pvclock": # seen on FreeBSD
continue
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get
# '/foo/bar (deleted)'
# assert os.path.exists(nt.path), nt.path
elif fname == 'addr':
assert value, repr(value)
elif fname == 'perms':
if not WINDOWS:
assert value, repr(value)
else:
assert isinstance(value, int)
assert value >= 0
def num_handles(self, ret, info):
assert isinstance(ret, int)
assert ret >= 0
def nice(self, ret, info):
assert isinstance(ret, int)
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [
getattr(psutil, x)
for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')
]
assert ret in priorities
assert isinstance(ret, enum.IntEnum)
def num_ctx_switches(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
assert isinstance(value, int)
assert value >= 0
def rlimit(self, ret, info):
assert isinstance(ret, tuple)
assert len(ret) == 2
assert ret[0] >= -1
assert ret[1] >= -1
def environ(self, ret, info):
assert isinstance(ret, dict)
for k, v in ret.items():
assert isinstance(k, str)
assert isinstance(v, str)
| TestFetchAllProcesses |
python | python__mypy | mypy/nodes.py | {
"start": 97953,
"end": 98375
} | class ____(Expression):
"""Typed dict expression TypedDict(...)."""
__slots__ = ("info",)
__match_args__ = ("info",)
# The class representation of this typed dict
info: TypeInfo
def __init__(self, info: TypeInfo) -> None:
super().__init__()
self.info = info
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_typeddict_expr(self)
| TypedDictExpr |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 14436,
"end": 14696
} | class ____(models.Model):
series = models.ForeignKey("Series", on_delete=models.CASCADE, related_name="works")
title = models.CharField(max_length=100)
history = HistoricalRecords()
class Meta:
order_with_respect_to = "series"
| SeriesWork |
python | wandb__wandb | wandb/old/summary.py | {
"start": 5877,
"end": 11399
} | class ____(SummarySubDict):
"""Store summary metrics (eg. accuracy) during and after a run.
You can manipulate this as if it's a Python dictionary but the keys
get mangled. .strip() is called on them, so spaces at the beginning
and end are removed.
"""
def __init__(self, run, summary=None):
super().__init__()
self._run = run
self._h5_path = os.path.join(self._run.dir, DEEP_SUMMARY_FNAME)
# Lazy load the h5 file
self._h5 = None
# Mirrored version of self._dict with versions of values that get written
# to JSON kept up to date by self._root_set() and self._root_del().
self._json_dict = {}
if summary is not None:
self._json_dict = summary
def _json_get(self, path):
pass
def _root_get(self, path, child_dict):
json_dict = self._json_dict
for key in path[:-1]:
json_dict = json_dict[key]
key = path[-1]
if key in json_dict:
child_dict[key] = self._decode(path, json_dict[key])
def _root_del(self, path):
json_dict = self._json_dict
for key in path[:-1]:
json_dict = json_dict[key]
val = json_dict[path[-1]]
del json_dict[path[-1]]
if isinstance(val, dict) and val.get("_type") in H5_TYPES:
if not h5py:
wandb.termerror("Deleting tensors in summary requires h5py")
else:
self.open_h5()
h5_key = "summary/" + ".".join(path)
del self._h5[h5_key]
self._h5.flush()
def _root_set(self, path, new_keys_values):
json_dict = self._json_dict
for key in path:
json_dict = json_dict[key]
for new_key, new_value in new_keys_values:
json_dict[new_key] = self._encode(new_value, path + (new_key,))
def write_h5(self, path, val):
# ensure the file is open
self.open_h5()
if not self._h5:
wandb.termerror("Storing tensors in summary requires h5py")
else:
try:
del self._h5["summary/" + ".".join(path)]
except KeyError:
pass
self._h5["summary/" + ".".join(path)] = val
self._h5.flush()
def read_h5(self, path, val=None):
# ensure the file is open
self.open_h5()
if not self._h5:
wandb.termerror("Reading tensors from summary requires h5py")
else:
return self._h5.get("summary/" + ".".join(path), val)
def open_h5(self):
if not self._h5 and h5py:
self._h5 = h5py.File(self._h5_path, "a", libver="latest")
def _decode(self, path, json_value):
"""Decode a `dict` encoded by `Summary._encode()`, loading h5 objects.
h5 objects may be very large, so we won't have loaded them automatically.
"""
if isinstance(json_value, dict):
if json_value.get("_type") in H5_TYPES:
return self.read_h5(path, json_value)
elif json_value.get("_type") == "data-frame":
wandb.termerror(
"This data frame was saved via the wandb data API. Contact support@wandb.com for help."
)
return None
# TODO: transform wandb objects and plots
else:
return SummarySubDict(self, path)
else:
return json_value
def _encode(self, value, path_from_root):
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in value.items():
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
friendly_value, converted = util.json_friendly(
val_to_json(self._run, path, value, namespace="summary")
)
json_value, compressed = util.maybe_compress_summary(
friendly_value, util.get_h5_typename(value)
)
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
def download_h5(run_id, entity=None, project=None, out_dir=None):
api = Api()
meta = api.download_url(
project or api.settings("project"),
DEEP_SUMMARY_FNAME,
entity=entity or api.settings("entity"),
run=run_id,
)
if meta and "md5" in meta and meta["md5"] is not None:
# TODO: make this non-blocking
wandb.termlog("Downloading summary data...")
path, res = api.download_write_file(meta, out_dir=out_dir)
return path
def upload_h5(file, run_id, entity=None, project=None):
api = Api()
wandb.termlog("Uploading summary data...")
with open(file, "rb") as f:
api.push(
{os.path.basename(file): f}, run=run_id, project=project, entity=entity
)
| Summary |
python | huggingface__transformers | src/transformers/models/vit_msn/modeling_vit_msn.py | {
"start": 10890,
"end": 11617
} | class ____(nn.Module):
"""
The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ViTMSNConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN
| ViTMSNSelfOutput |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 217306,
"end": 220774
} | class ____:
def test_pdf(self):
rng = np.random.default_rng(3791303244302340058)
size = 10 # number of points to check
x = rng.normal(scale=10, size=size)
a = rng.uniform(high=10, size=size)
res = stats.dgamma.pdf(x, a)
ref = stats.gamma.pdf(np.abs(x), a) / 2
assert_allclose(res, ref)
dist = stats.dgamma(a)
# There was an intermittent failure with assert_equal on Linux - 32 bit
assert_allclose(dist.pdf(x), res, rtol=5e-16)
# mpmath was used to compute the expected values.
# For x < 0, cdf(x, a) is mp.gammainc(a, -x, mp.inf, regularized=True)/2
# For x > 0, cdf(x, a) is (1 + mp.gammainc(a, 0, x, regularized=True))/2
# E.g.
# from mpmath import mp
# mp.dps = 50
# print(float(mp.gammainc(1, 20, mp.inf, regularized=True)/2))
# prints
# 1.030576811219279e-09
@pytest.mark.parametrize('x, a, expected',
[(-20, 1, 1.030576811219279e-09),
(-40, 1, 2.1241771276457944e-18),
(-50, 5, 2.7248509914602648e-17),
(-25, 0.125, 5.333071920958156e-14),
(5, 1, 0.9966310265004573)])
def test_cdf_ppf_sf_isf_tail(self, x, a, expected):
cdf = stats.dgamma.cdf(x, a)
assert_allclose(cdf, expected, rtol=5e-15)
ppf = stats.dgamma.ppf(expected, a)
assert_allclose(ppf, x, rtol=5e-15)
sf = stats.dgamma.sf(-x, a)
assert_allclose(sf, expected, rtol=5e-15)
isf = stats.dgamma.isf(expected, a)
assert_allclose(isf, -x, rtol=5e-15)
@pytest.mark.parametrize("a, ref",
[(1.5, 2.0541199559354117),
(1.3, 1.9357296377121247),
(1.1, 1.7856502333412134)])
def test_entropy(self, a, ref):
# The reference values were calculated with mpmath:
# def entropy_dgamma(a):
# def pdf(x):
# A = mp.one / (mp.mpf(2.) * mp.gamma(a))
# B = mp.fabs(x) ** (a - mp.one)
# C = mp.exp(-mp.fabs(x))
# h = A * B * C
# return h
#
# return -mp.quad(lambda t: pdf(t) * mp.log(pdf(t)),
# [-mp.inf, mp.inf])
assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-14)
@pytest.mark.parametrize("a, ref",
[(1e-100, -1e+100),
(1e-10, -9999999975.858217),
(1e-5, -99987.37111657023),
(1e4, 6.717222565586032),
(1000000000000000.0, 19.38147391121996),
(1e+100, 117.2413403634669)])
def test_entropy_entreme_values(self, a, ref):
# The reference values were calculated with mpmath:
# from mpmath import mp
# mp.dps = 500
# def second_dgamma(a):
# a = mp.mpf(a)
# x_1 = a + mp.log(2) + mp.loggamma(a)
# x_2 = (mp.one - a) * mp.digamma(a)
# h = x_1 + x_2
# return h
assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-10)
def test_entropy_array_input(self):
x = np.array([1, 5, 1e20, 1e-5])
y = stats.dgamma.entropy(x)
for i in range(len(y)):
assert y[i] == stats.dgamma.entropy(x[i])
| TestDgamma |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/training_status.py | {
"start": 375,
"end": 580
} | class ____(Enum):
LESSON_NUM = "lesson_num"
STATS_METADATA = "metadata"
CHECKPOINTS = "checkpoints"
FINAL_CHECKPOINT = "final_checkpoint"
ELO = "elo"
@attr.s(auto_attribs=True)
| StatusType |
python | wandb__wandb | tests/unit_tests/test_lib/test_fsm.py | {
"start": 463,
"end": 982
} | class ____(TrackCalls):
def __init__(self, calls):
super().__init__(calls)
def on_state(self, inputs) -> None:
self._calls.append("B:on_state")
def to_a(self, inputs) -> bool:
self._calls.append("to_a")
return True
def test_normal():
calls = []
sa = A(calls)
sb = B(calls)
f = Fsm(
states=(sa, sb), table={A: [FsmEntry(sa.to_b, B)], B: [FsmEntry(sb.to_a, A)]}
)
f.input({"input1": 1, "input2": 2})
assert calls == ["to_b", "B:on_state"]
| B |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 117179,
"end": 119007
} | class ____(fixtures.DeclarativeMappedTest):
"""test :ticket:`3831`"""
__only_on__ = "sqlite"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Venue(Base):
__tablename__ = "venue"
id = Column(Integer, primary_key=True)
name = Column(String)
descendants = relationship(
"Venue",
primaryjoin=func.instr(
remote(foreign(name)), name + "/"
).as_comparison(1, 2)
== 1,
viewonly=True,
order_by=name,
)
@classmethod
def insert_data(cls, connection):
Venue = cls.classes.Venue
s = Session(connection)
s.add_all(
[
Venue(name="parent1"),
Venue(name="parent2"),
Venue(name="parent1/child1"),
Venue(name="parent1/child2"),
Venue(name="parent2/child1"),
]
)
s.commit()
def test_lazyload(self):
Venue = self.classes.Venue
s = fixture_session()
v1 = s.query(Venue).filter_by(name="parent1").one()
eq_(
[d.name for d in v1.descendants],
["parent1/child1", "parent1/child2"],
)
def test_joinedload(self):
Venue = self.classes.Venue
s = fixture_session()
def go():
v1 = (
s.query(Venue)
.filter_by(name="parent1")
.options(joinedload(Venue.descendants))
.one()
)
eq_(
[d.name for d in v1.descendants],
["parent1/child1", "parent1/child2"],
)
self.assert_sql_count(testing.db, go, 1)
| FunctionAsPrimaryJoinTest |
python | pytorch__pytorch | test/distributed/checkpoint/test_file_system_checkpoint_cpu.py | {
"start": 3124,
"end": 3602
} | class ____:
def __init__(self, value: IO[bytes]) -> Any:
self.state = {"blob": value}
def state_dict(self) -> dict[str, Any]:
return self.state
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
self.state = state_dict
def __eq__(self, other: object) -> bool:
return isinstance(other, BlobState) and self.state == other.state
def __repr__(self) -> str:
return f"BlobState({self.state['blob']})"
| BlobState |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 2619,
"end": 3608
} | class ____:
"""A member of object.
This is used for the result of `Documenter.get_module_members()` to
represent each member of the object.
"""
__slots__ = '__name__', 'object', 'docstring', 'class_', 'skipped'
__name__: str
object: Any
docstring: str | None
class_: Any
skipped: bool
def __init__(
self,
name: str,
obj: Any,
*,
docstring: str | None = None,
class_: Any = None,
skipped: bool = False,
) -> None:
self.__name__ = name
self.object = obj
self.docstring = docstring
self.class_ = class_
self.skipped = skipped
def __repr__(self) -> str:
return (
f'ObjectMember('
f'name={self.__name__!r}, '
f'obj={self.object!r}, '
f'docstring={self.docstring!r}, '
f'class_={self.class_!r}, '
f'skipped={self.skipped!r}'
f')'
)
| ObjectMember |
python | huggingface__transformers | src/transformers/models/cpmant/tokenization_cpmant.py | {
"start": 2345,
"end": 8039
} | class ____(PreTrainedTokenizer):
"""
Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bod_token (`str`, *optional*, defaults to `"<d>"`):
The beginning of document token.
eod_token (`str`, *optional*, defaults to `"</d>"`):
The end of document token.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token.
line_token (`str`, *optional*, defaults to `"</n>"`):
The line token.
space_token (`str`, *optional*, defaults to `"</_>"`):
The space token.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
add_prefix_space = False
def __init__(
self,
vocab_file,
bod_token="<d>",
eod_token="</d>",
bos_token="<s>",
eos_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
line_token="</n>",
space_token="</_>",
padding_side="left",
**kwargs,
):
requires_backends(self, ["rjieba"])
self.bod_token = bod_token
self.eod_token = eod_token
self.encoder = load_vocab(vocab_file)
self.encoder[" "] = self.encoder[space_token]
self.encoder["\n"] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
self.decoder = {v: k for k, v in self.encoder.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token)
super().__init__(
bod_token=bod_token,
eod_token=eod_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
unk_token=unk_token,
line_token=line_token,
space_token=space_token,
padding_side=padding_side,
token_type_ids_pattern="all_zeros",
token_type_ids_include_special_tokens=True,
special_tokens_pattern="bos",
**kwargs,
)
for special_token in [space_token, line_token]:
token_id = self.added_tokens_encoder.pop(special_token, None)
if token_id is not None:
self._added_tokens_decoder.pop(token_id, None)
self._update_total_vocab_size()
@property
def bod_token_id(self):
return self.encoder[self.bod_token]
@property
def eod_token_id(self):
return self.encoder[self.eod_token]
@property
def newline_id(self):
return self.encoder["\n"]
@property
def vocab_size(self) -> int:
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def _tokenize(self, text):
"""Tokenize a string."""
output_tokens = []
for x in rjieba.cut(text, False):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))
return output_tokens
def _decode(self, token_ids, **kwargs):
"""Decode ids into a string."""
token_ids = [i for i in token_ids if i >= 0]
token_ids = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(token_ids, **kwargs)
def check(self, token):
return token in self.encoder
def convert_tokens_to_string(self, tokens: list[str]) -> str:
return "".join(tokens)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
index = 0
if " " in self.encoder:
self.encoder["</_>"] = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
self.encoder["</n>"] = self.encoder["\n"]
del self.encoder["\n"]
self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
__all__ = ["CpmAntTokenizer"]
| CpmAntTokenizer |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/dist_autograd_test.py | {
"start": 6809,
"end": 39643
} | class ____(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(self._next_rank(), exec_mode, method, *args)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
def _test_graph(self, fn, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor()
t2 = build_sparse_tensor()
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(worker_name(dst_rank), fn, args=(t1, t2)).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
next(iter(recv_functions.values())),
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values())))
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
next(iter(recv_functions.values())),
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values())))
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
next(iter(send_functions.values())),
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=False)
t2 = build_sparse_tensor(requires_grad=False)
else:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
rpc.remote(worker_name(dst_rank), torch.add, args=(t1, t2)).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
def _test_rpc_complex_args(self, exec_mode, sparse):
with dist_autograd.context():
num_tensors = 10
tensors = []
for i in range(num_tensors):
if sparse:
tensor = build_sparse_tensor(requires_grad=(i % 2 == 0))
else:
tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0))
tensors.append(tensor)
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), torch.stack, args=(tensors,))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError(f"Unrecognized ExecMode {exec_mode}")
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = next(
iter(dist_autograd._current_context()._send_functions().values())
).next_functions
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(worker_name(dst_rank), _set_rpc_done, args=(context_id, 1))
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
def _backward_no_grad_on_tensor(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2)
if sparse:
loss_local = torch.sparse.sum(loss_local)
else:
loss_local = loss_local.sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse):
local_ret = torch.add(t1, t2)
if sparse:
local_ret = torch.sparse.sum(local_ret)
else:
local_ret = local_ret.sum()
local_ret.backward()
with dist_autograd.context() as context_id:
if sparse:
rref_t1 = rpc.remote(
rref_owner,
build_sparse_tensor,
args=(
False,
True,
),
)
else:
rref_t1 = rpc.remote(
rref_owner,
_torch_ones,
args=((3, 3),),
kwargs={"requires_grad": True},
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
if sparse:
ret = torch.sparse.sum(ret)
else:
ret = ret.sum()
dist_autograd.backward(context_id, [ret])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse):
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
if sparse:
torch.sparse.sum(local_ret).backward()
else:
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(worker_name(self.rank), create_ref_fn, args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = [
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse),
)
for rank_diff in rank_diffs
]
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
if sparse:
val = self._exec_func(exec_mode, torch.mul, s1, s2)
val = self._exec_func(exec_mode, torch.mul, val, val)
loss = torch.sparse.sum(val)
else:
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
def _backward_different_dtypes(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_python_udf(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_script_call(self, t1, t2, sparse):
local_grads = None
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
if sparse:
loss = torch.sparse.sum(forward_ret)
else:
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
def _nested_backward_accumulate_grads(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
def _backwards_nested_python_udf(self, t1, t2, sparse):
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = t1 * t2 * t3 * t4 * res
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
def _mixed_requires_grad(self, t1, t2, sparse):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2
)
self.assertEqual(t1 * t2, ret)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
def _multiple_backward(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
# Run backward in a loop multiple times.
for _ in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple(self, dst, t1, t2, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(dst, exec_mode, torch.add, t1, t2)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = next(iter(send_functions.values())).next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning response to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
| CommonDistAutogradTest |
python | pallets__jinja | src/jinja2/exceptions.py | {
"start": 4625,
"end": 4742
} | class ____(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
| UndefinedError |
python | getsentry__sentry | src/sentry/auth/authenticators/base.py | {
"start": 1241,
"end": 1361
} | class ____(Enum):
NEW = "new"
MULTI = "multi"
ROTATION = "rotation"
EXISTING = "existing"
| EnrollmentStatus |
python | scipy__scipy | scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py | {
"start": 969,
"end": 1287
} | class ____:
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
| HessianLinearOperator |
python | sympy__sympy | sympy/sets/fancysets.py | {
"start": 3638,
"end": 4373
} | class ____(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
elif other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
def _eval_is_subset(self, other):
return Range(oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(oo).is_superset(other)
| Naturals0 |
python | getsentry__sentry | src/sentry/workflow_engine/models/detector_state.py | {
"start": 336,
"end": 1740
} | class ____(DefaultFieldsModel):
"""
This table can be seen as a denormalization of the latest open period state
of the issue associated to a detector. We need this because open-periods
are asynchronously created and there are scernios where we need to know the
detector state immediately after a state change.
"""
__relocation_scope__ = RelocationScope.Excluded
detector = FlexibleForeignKey("workflow_engine.Detector")
# This key is used when a detector is using group-by
# allows us to link to a specific group from a single detector
detector_group_key = models.CharField(max_length=200, blank=True, null=True)
# If the detector has met the conditions to be in an triggered state
is_triggered = models.BooleanField(default=False, db_column="active")
# The detectors priority level from the last detector evaluation
state = models.CharField(max_length=200, default=DetectorPriorityLevel.OK)
@property
def priority_level(self) -> DetectorPriorityLevel:
"""Returns the state as a DetectorPriorityLevel enum."""
return DetectorPriorityLevel(int(self.state))
class Meta:
constraints = [
models.UniqueConstraint(
F("detector"),
Coalesce("detector_group_key", Value("")),
name="detector_state_unique_group_key",
),
]
| DetectorState |
python | explosion__spaCy | spacy/lang/pl/__init__.py | {
"start": 466,
"end": 713
} | class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
| PolishDefaults |
python | pytoolz__toolz | toolz/functoolz.py | {
"start": 20995,
"end": 29757
} | class ____:
"""A wrapper around a function to catch exceptions and
dispatch to a handler.
This is like a functional try/except block, in the same way that
ifexprs are functional if/else blocks.
Examples
--------
>>> excepting = excepts(
... ValueError,
... lambda a: [1, 2].index(a),
... lambda _: -1,
... )
>>> excepting(1)
0
>>> excepting(3)
-1
Multiple exceptions and default except clause.
>>> excepting = excepts((IndexError, KeyError), lambda a: a[0])
>>> excepting([])
>>> excepting([1])
1
>>> excepting({})
>>> excepting({0: 1})
1
"""
def __init__(self, exc, func, handler=return_none):
self.exc = exc
self.func = func
self.handler = handler
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.exc as e:
return self.handler(e)
@instanceproperty(classval=__doc__)
def __doc__(self):
from textwrap import dedent
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '(%s)' % ', '.join(
map(attrgetter('__name__'), exc),
)
else:
exc_name = exc.__name__
return dedent(
"""\
A wrapper around {inst.func.__name__!r} that will except:
{exc}
and handle any exceptions with {inst.handler.__name__!r}.
Docs for {inst.func.__name__!r}:
{inst.func.__doc__}
Docs for {inst.handler.__name__!r}:
{inst.handler.__doc__}
"""
).format(
inst=self,
exc=exc_name,
)
except AttributeError:
return type(self).__doc__
@property
def __name__(self):
exc = self.exc
try:
if isinstance(exc, tuple):
exc_name = '_or_'.join(map(attrgetter('__name__'), exc))
else:
exc_name = exc.__name__
return f'{self.func.__name__}_excepting_{exc_name}'
except AttributeError:
return 'excepting'
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
if sigspec is None:
try:
sigspec = inspect.signature(func)
except (ValueError, TypeError) as e:
sigspec = e
if isinstance(sigspec, ValueError):
return None, builtin_func(*builtin_args)
elif not isinstance(sigspec, inspect.Signature):
if (
func in _sigs.signatures
and (
hasattr(func, '__signature__')
and hasattr(func.__signature__, '__get__')
)
):
val = builtin_func(*builtin_args)
return None, val
return None, False
return sigspec, None
if PYPY: # pragma: no cover
_check_sigspec_orig = _check_sigspec
def _check_sigspec(sigspec, func, builtin_func, *builtin_args):
# PyPy may lie, so use our registry for builtins instead
if func in _sigs.signatures:
val = builtin_func(*builtin_args)
return None, val
return _check_sigspec_orig(sigspec, func, builtin_func, *builtin_args)
_check_sigspec.__doc__ = """ \
Private function to aid in introspection compatibly across Python versions.
If a callable doesn't have a signature (Python 3) or an argspec (Python 2),
the signature registry in toolz._signatures is used.
"""
def num_required_args(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._num_required_args,
func)
if sigspec is None:
return rv
return sum(1 for p in sigspec.parameters.values()
if p.default is p.empty
and p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY))
def has_varargs(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_varargs, func)
if sigspec is None:
return rv
return any(p.kind == p.VAR_POSITIONAL
for p in sigspec.parameters.values())
def has_keywords(func, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._has_keywords, func)
if sigspec is None:
return rv
return any(p.default is not p.empty
or p.kind in (p.KEYWORD_ONLY, p.VAR_KEYWORD)
for p in sigspec.parameters.values())
def is_valid_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_valid_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind(*args, **kwargs)
except TypeError:
return False
return True
def is_partial_args(func, args, kwargs, sigspec=None):
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_partial_args,
func, args, kwargs)
if sigspec is None:
return rv
try:
sigspec.bind_partial(*args, **kwargs)
except TypeError:
return False
return True
def is_arity(n, func, sigspec=None):
""" Does a function have only n positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x):
... return x
>>> is_arity(1, f)
True
>>> def g(x, y=1):
... return x + y
>>> is_arity(1, g)
False
"""
sigspec, rv = _check_sigspec(sigspec, func, _sigs._is_arity, n, func)
if sigspec is None:
return rv
num = num_required_args(func, sigspec)
if num is not None:
num = num == n
if not num:
return False
varargs = has_varargs(func, sigspec)
if varargs:
return False
keywords = has_keywords(func, sigspec)
if keywords:
return False
if num is None or varargs is None or keywords is None: # pragma: no cover
return None
return True
num_required_args.__doc__ = """ \
Number of required positional arguments
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y, z=3):
... return x + y + z
>>> num_required_args(f)
2
>>> def g(*args, **kwargs):
... pass
>>> num_required_args(g)
0
"""
has_varargs.__doc__ = """ \
Does a function have variadic positional arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(*args):
... return args
>>> has_varargs(f)
True
>>> def g(**kwargs):
... return kwargs
>>> has_varargs(g)
False
"""
has_keywords.__doc__ = """ \
Does a function have keyword arguments?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def f(x, y=0):
... return x + y
>>> has_keywords(f)
True
"""
is_valid_args.__doc__ = """ \
Is ``func(*args, **kwargs)`` a valid function call?
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_valid_args(add, (1,), {})
False
>>> is_valid_args(add, (1, 2), {})
True
>>> is_valid_args(map, (), {})
False
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
is_partial_args.__doc__ = """ \
Can partial(func, *args, **kwargs)(*args2, **kwargs2) be a valid call?
Returns True *only* if the call is valid or if it is possible for the
call to become valid by adding more positional or keyword arguments.
This function relies on introspection and does not call the function.
Returns None if validity can't be determined.
>>> def add(x, y):
... return x + y
>>> is_partial_args(add, (1,), {})
True
>>> is_partial_args(add, (1, 2), {})
True
>>> is_partial_args(add, (1, 2, 3), {})
False
>>> is_partial_args(map, (), {})
True
**Implementation notes**
Python 2 relies on ``inspect.getargspec``, which only works for
user-defined functions. Python 3 uses ``inspect.signature``, which
works for many more types of callables.
Many builtins in the standard library are also supported.
"""
from . import _signatures as _sigs
| excepts |
python | huggingface__transformers | src/transformers/models/video_llava/configuration_video_llava.py | {
"start": 888,
"end": 6448
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VideoLlavaForConditionalGeneration`]. It is used to instantiate an
VideoLlava model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the like LanguageBind/Video-LLaVA-7B-hf.
e.g. [LanguageBind/Video-LLaVA-7B-hf](https://huggingface.co/LanguageBind/Video-LLaVA-7B-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`VideoLlavaVisionConfig`, *optional*):
Custom vision config or dict. Defaults to `CLIPVisionConfig` if not indicated.
text_config (`Union[AutoConfig, dict]`, *optional*):
The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`.
Defaults to `LlamaConfig` if not indicated.
image_token_index (`int`, *optional*, defaults to 32000):
The image token index to encode the image prompt.
video_token_index (`int`, *optional*, defaults to 32001):
The video token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the CLIP backbone.
Can be either "full" to select all features or "default" to select features without `CLS`.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_seq_length (`int`, *optional*, defaults to 256):
Sequence length of one image embedding.
video_seq_length (`int`, *optional*, defaults to 2056):
Sequence length of one video embedding.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
Example:
```python
>>> from transformers import VideoLlavaForConditionalGeneration, VideoLlavaConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> # Initializing a VideoLlava video_llava-1.5-7b style configuration
>>> configuration = VideoLlavaConfig(vision_config, text_config)
>>> # Initializing a model from the video_llava-1.5-7b style configuration
>>> model = VideoLlavaForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "video_llava"
attribute_map = {
"image_token_id": "image_token_index",
"video_token_id": "video_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=32000,
video_token_index=32001,
projector_hidden_act="gelu",
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_seq_length=256,
video_seq_length=2056,
multimodal_projector_bias=True,
**kwargs,
):
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.image_seq_length = image_seq_length
self.video_seq_length = video_seq_length
self.multimodal_projector_bias = multimodal_projector_bias
self.vision_config = vision_config
if isinstance(self.vision_config, dict):
if "model_type" not in vision_config:
vision_config["model_type"] = "clip_vision_model"
logger.warning("Key=`model_type` not found in vision config, setting it to `clip_vision_model`")
self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
self.vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=224,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
if isinstance(text_config, dict):
if "model_type" not in text_config:
text_config["model_type"] = "llama"
logger.warning("Key=`model_type` not found in text config, setting it to `llama`")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["VideoLlavaConfig"]
| VideoLlavaConfig |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 25415,
"end": 25708
} | class ____(Interface):
"""A utility which generates a request"""
def __call__(environ):
"""Return an instance of ``pyramid.request.Request``"""
def blank(path):
"""Return an empty request object (see
:meth:`pyramid.request.Request.blank`)"""
| IRequestFactory |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_auth.py | {
"start": 1059,
"end": 2573
} | class ____:
@mock.patch("airflow.api_fastapi.core_api.routes.ui.auth.get_auth_manager")
def test_should_response_200(self, mock_get_auth_manager, test_client):
mock_get_auth_manager.return_value.get_authorized_menu_items.return_value = [
MenuItem.VARIABLES,
MenuItem.CONNECTIONS,
]
mock_get_auth_manager.return_value.get_extra_menu_items.return_value = [
ExtraMenuItem(text="name1", href="path1"),
ExtraMenuItem(text="name2", href="path2"),
]
response = test_client.get("/auth/menus")
assert response.status_code == 200
assert response.json() == {
"authorized_menu_items": ["Variables", "Connections"],
"extra_menu_items": [
{"text": "name1", "href": "path1"},
{"text": "name2", "href": "path2"},
],
}
def test_with_unauthenticated_user(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/auth/menus")
assert response.status_code == 401
assert response.json() == {"detail": "Not authenticated"}
@mock.patch.object(SimpleAuthManager, "filter_authorized_menu_items", return_value=[])
def test_with_unauthorized_user(self, _, unauthorized_test_client):
response = unauthorized_test_client.get("/auth/menus")
assert response.status_code == 200
assert response.json() == {"authorized_menu_items": [], "extra_menu_items": []}
| TestGetAuthLinks |
python | oauthlib__oauthlib | oauthlib/oauth1/rfc5849/errors.py | {
"start": 2406,
"end": 2474
} | class ____(OAuth1Error):
error = 'invalid_client'
| InvalidClientError |
python | pydata__xarray | xarray/core/groupby.py | {
"start": 16661,
"end": 19573
} | class ____:
"""
Helper class for multi-variable GroupBy.
This satisfies the Grouper interface, but is awkward to wrap in ResolvedGrouper.
For one, it simply re-infers a new EncodedGroups using known information
in existing ResolvedGroupers. So passing in a `group` (hard to define),
and `obj` (pointless) is not useful.
"""
groupers: tuple[ResolvedGrouper, ...]
def factorize(self) -> EncodedGroups:
from xarray.groupers import EncodedGroups
groupers = self.groupers
# At this point all arrays have been factorized.
codes = tuple(grouper.codes for grouper in groupers)
shape = tuple(grouper.size for grouper in groupers)
masks = tuple((code == -1) for code in codes)
# We broadcast the codes against each other
broadcasted_codes = broadcast(*codes)
# This fully broadcasted DataArray is used as a template later
first_codes = broadcasted_codes[0]
# Now we convert to a single variable GroupBy problem
_flatcodes = np.ravel_multi_index(
tuple(codes.data for codes in broadcasted_codes), shape, mode="wrap"
)
# NaNs; as well as values outside the bins are coded by -1
# Restore these after the raveling
broadcasted_masks = broadcast(*masks)
mask = functools.reduce(np.logical_or, broadcasted_masks) # type: ignore[arg-type]
_flatcodes = where(mask.data, -1, _flatcodes)
full_index = pd.MultiIndex.from_product(
[list(grouper.full_index.values) for grouper in groupers],
names=tuple(grouper.name for grouper in groupers),
)
if not full_index.is_unique:
raise ValueError(
"The output index for the GroupBy is non-unique. "
"This is a bug in the Grouper provided."
)
# This will be unused when grouping by dask arrays, so skip..
if not is_chunked_array(_flatcodes):
# Constructing an index from the product is wrong when there are missing groups
# (e.g. binning, resampling). Account for that now.
midx = full_index[np.sort(pd.unique(_flatcodes[~mask]))]
group_indices = _codes_to_group_indices(_flatcodes.ravel(), len(full_index))
else:
midx = full_index
group_indices = None
dim_name = "stacked_" + "_".join(str(grouper.name) for grouper in groupers)
coords = Coordinates.from_pandas_multiindex(midx, dim=dim_name)
for grouper in groupers:
coords.variables[grouper.name].attrs = grouper.group.attrs
return EncodedGroups(
codes=first_codes.copy(data=_flatcodes),
full_index=full_index,
group_indices=group_indices,
unique_coord=Variable(dims=(dim_name,), data=midx.values),
coords=coords,
)
| ComposedGrouper |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/represented.py | {
"start": 576,
"end": 3879
} | class ____(ABC):
"""RepresentedJob is a base class for ExternalPipeline or HistoricalPipeline.
The name is "represented" because this is an in-memory representation of a job.
The representation of a job could be referring to a job resident in
another process *or* could be referring to a historical view of the job.
"""
@property
@abstractmethod
def _job_index(self) -> JobIndex: ...
@property
def name(self) -> str:
return self._job_index.name
@property
def description(self) -> Optional[str]:
return self._job_index.description
@property
def owners(self) -> Optional[Sequence[str]]:
return self._job_index.owners
# Snapshot things
@property
@abstractmethod
def computed_job_snapshot_id(self) -> str:
pass
@property
@abstractmethod
def identifying_job_snapshot_id(self) -> str:
pass
@property
def job_snapshot(self) -> JobSnap:
return self._job_index.job_snapshot
@property
def parent_job_snapshot(self) -> Optional[JobSnap]:
return self._job_index.parent_job_snapshot
@property
def op_selection(self) -> Optional[Sequence[str]]:
return (
self._job_index.job_snapshot.lineage_snapshot.op_selection
if self._job_index.job_snapshot.lineage_snapshot
else None
)
@property
def resolved_op_selection(self) -> Optional[AbstractSet[str]]:
return (
self._job_index.job_snapshot.lineage_snapshot.resolved_op_selection
if self._job_index.job_snapshot.lineage_snapshot
else None
)
# Config
@property
def config_schema_snapshot(self) -> ConfigSchemaSnapshot:
return self._job_index.config_schema_snapshot
# DagsterTypes
@property
def dagster_type_snaps(self) -> Sequence[DagsterTypeSnap]:
return self._job_index.get_dagster_type_snaps()
def has_dagster_type_named(self, type_name: str) -> bool:
return self._job_index.has_dagster_type_name(type_name)
def get_dagster_type_by_name(self, type_name: str) -> DagsterTypeSnap:
return self._job_index.get_dagster_type_from_name(type_name)
# Modes
@property
def mode_def_snaps(self) -> Sequence[ModeDefSnap]:
return self._job_index.job_snapshot.mode_def_snaps
def get_mode_def_snap(self, mode_name: str) -> ModeDefSnap:
return self._job_index.get_mode_def_snap(mode_name)
# Deps
@property
def dep_structure_index(self) -> DependencyStructureIndex:
return self._job_index.dep_structure_index
# Nodes
def get_node_def_snap(self, node_def_name: str) -> Union[OpDefSnap, GraphDefSnap]:
check.str_param(node_def_name, "node_def_name")
return self._job_index.get_node_def_snap(node_def_name)
def get_dep_structure_index(self, node_def_name: str) -> DependencyStructureIndex:
check.str_param(node_def_name, "node_def_name")
return self._job_index.get_dep_structure_index(node_def_name)
# Graph
def get_graph_name(self) -> str:
return self._job_index.job_snapshot.graph_def_name
# Job properties
@abstractmethod
def get_external_job_source(self) -> Optional[str]:
pass
| RepresentedJob |
python | celery__celery | t/unit/utils/test_time.py | {
"start": 1469,
"end": 7695
} | class ____:
def test_parse_with_timezone(self):
d = datetime.now(_timezone.utc).replace(tzinfo=ZoneInfo("UTC"))
assert parse_iso8601(d.isoformat()) == d
# 2013-06-07T20:12:51.775877+00:00
iso = d.isoformat()
iso1 = iso.replace('+00:00', '-01:00')
d1 = parse_iso8601(iso1)
d1_offset_in_minutes = d1.utcoffset().total_seconds() / 60
assert d1_offset_in_minutes == -60
iso2 = iso.replace('+00:00', '+01:00')
d2 = parse_iso8601(iso2)
d2_offset_in_minutes = d2.utcoffset().total_seconds() / 60
assert d2_offset_in_minutes == +60
iso3 = iso.replace('+00:00', 'Z')
d3 = parse_iso8601(iso3)
assert d3.tzinfo == _timezone.utc
@pytest.mark.parametrize('delta,expected', [
(timedelta(days=2), datetime(2010, 3, 30, 0, 0)),
(timedelta(hours=2), datetime(2010, 3, 30, 11, 0)),
(timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)),
(timedelta(seconds=2), None),
])
def test_delta_resolution(delta, expected):
dt = datetime(2010, 3, 30, 11, 50, 58, 41065)
assert delta_resolution(dt, delta) == expected or dt
@pytest.mark.parametrize('seconds,expected', [
(4 * 60 * 60 * 24, '4.00 days'),
(1 * 60 * 60 * 24, '1.00 day'),
(4 * 60 * 60, '4.00 hours'),
(1 * 60 * 60, '1.00 hour'),
(4 * 60, '4.00 minutes'),
(1 * 60, '1.00 minute'),
(4, '4.00 seconds'),
(1, '1.00 second'),
(4.3567631221, '4.36 seconds'),
(0, 'now'),
])
def test_humanize_seconds(seconds, expected):
assert humanize_seconds(seconds) == expected
def test_humanize_seconds__prefix():
assert humanize_seconds(4, prefix='about ') == 'about 4.00 seconds'
def test_maybe_iso8601_datetime():
now = datetime.now()
assert maybe_iso8601(now) is now
@pytest.mark.parametrize('date_str,expected', [
('2011-11-04T00:05:23', datetime(2011, 11, 4, 0, 5, 23)),
('2011-11-04T00:05:23Z', datetime(2011, 11, 4, 0, 5, 23, tzinfo=_timezone.utc)),
('2011-11-04 00:05:23.283+00:00',
datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=_timezone.utc)),
('2011-11-04T00:05:23+04:00',
datetime(2011, 11, 4, 0, 5, 23, tzinfo=_timezone(timedelta(seconds=14400)))),
])
def test_iso8601_string_datetime(date_str, expected):
assert maybe_iso8601(date_str) == expected
@pytest.mark.parametrize('arg,expected', [
(30, timedelta(seconds=30)),
(30.6, timedelta(seconds=30.6)),
(timedelta(days=2), timedelta(days=2)),
])
def test_maybe_timedelta(arg, expected):
assert maybe_timedelta(arg) == expected
def test_remaining():
# Relative
remaining(datetime.now(_timezone.utc), timedelta(hours=1), relative=True)
"""
The upcoming cases check whether the next run is calculated correctly
"""
eastern_tz = ZoneInfo("US/Eastern")
tokyo_tz = ZoneInfo("Asia/Tokyo")
# Case 1: `start` in UTC and `now` in other timezone
start = datetime.now(ZoneInfo("UTC"))
now = datetime.now(eastern_tz)
delta = timedelta(hours=1)
assert str(start.tzinfo) == str(ZoneInfo("UTC"))
assert str(now.tzinfo) == str(eastern_tz)
rem_secs = remaining(start, delta, now).total_seconds()
# assert remaining time is approximately equal to delta
assert rem_secs == pytest.approx(delta.total_seconds(), abs=1)
# Case 2: `start` and `now` in different timezones (other than UTC)
start = datetime.now(eastern_tz)
now = datetime.now(tokyo_tz)
delta = timedelta(hours=1)
assert str(start.tzinfo) == str(eastern_tz)
assert str(now.tzinfo) == str(tokyo_tz)
rem_secs = remaining(start, delta, now).total_seconds()
assert rem_secs == pytest.approx(delta.total_seconds(), abs=1)
"""
Case 3: DST check
Suppose start (which is last_run_time) is in EST while next_run is in EDT,
then check whether the `next_run` is actually the time specified in the
start (i.e. there is not an hour diff due to DST).
In 2019, DST starts on March 10
"""
start = datetime(
month=3, day=9, year=2019, hour=10,
minute=0, tzinfo=eastern_tz) # EST
now = datetime(
day=11, month=3, year=2019, hour=1,
minute=0, tzinfo=eastern_tz) # EDT
delta = ffwd(hour=10, year=2019, microsecond=0, minute=0,
second=0, day=11, weeks=0, month=3)
# `next_actual_time` is the next time to run (derived from delta)
next_actual_time = datetime(
day=11, month=3, year=2019, hour=10, minute=0, tzinfo=eastern_tz) # EDT
assert start.tzname() == "EST"
assert now.tzname() == "EDT"
assert next_actual_time.tzname() == "EDT"
rem_time = remaining(start, delta, now)
next_run = now + rem_time
assert next_run == next_actual_time
"""
Case 4: DST check between now and next_run
Suppose start (which is last_run_time) and now are in EST while next_run
is in EDT, then check that the remaining time returned is the exact real
time difference (not wall time).
For example, between
2019-03-10 01:30:00-05:00 and
2019-03-10 03:30:00-04:00
There is only 1 hour difference in real time, but 2 on wall time.
Python by default uses wall time in arithmetic between datetimes with
equal non-UTC timezones.
In 2019, DST starts on March 10
"""
start = datetime(
day=10, month=3, year=2019, hour=1,
minute=30, tzinfo=eastern_tz) # EST
now = datetime(
day=10, month=3, year=2019, hour=1,
minute=30, tzinfo=eastern_tz) # EST
delta = ffwd(hour=3, year=2019, microsecond=0, minute=30,
second=0, day=10, weeks=0, month=3)
# `next_actual_time` is the next time to run (derived from delta)
next_actual_time = datetime(
day=10, month=3, year=2019, hour=3, minute=30, tzinfo=eastern_tz) # EDT
assert start.tzname() == "EST"
assert now.tzname() == "EST"
assert next_actual_time.tzname() == "EDT"
rem_time = remaining(start, delta, now)
assert rem_time.total_seconds() == 3600
next_run_utc = now.astimezone(ZoneInfo("UTC")) + rem_time
next_run_edt = next_run_utc.astimezone(eastern_tz)
assert next_run_utc == next_actual_time
assert next_run_edt == next_actual_time
| test_iso8601 |
python | pytorch__pytorch | test/package/package_a/fake_script_class.py | {
"start": 57,
"end": 339
} | class ____:
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
def set_foo(self, x):
self.foo = x
@torch.jit.script
def uses_script_class(x):
"""Intended to be scripted."""
foo = MyScriptClass(x)
return foo.foo
| MyScriptClass |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 8470,
"end": 8543
} | class ____(VyperException):
"""Invalid literal value."""
| InvalidLiteral |
python | coleifer__peewee | tests/sqlite_udf.py | {
"start": 788,
"end": 946
} | class ____(TestModel):
url = TextField(default='')
data = TextField(default='')
timestamp = DateTimeField(default=datetime.datetime.now)
| APIResponse |
python | PyCQA__pylint | tests/functional/p/postponed/postponed_evaluation_pep585.py | {
"start": 477,
"end": 526
} | class ____(typing.List[int]):
pass
| CustomIntList |
python | django__django | django/core/validators.py | {
"start": 4432,
"end": 7475
} | class ____(RegexValidator):
# IP patterns
ipv4_re = (
r"(?:0|25[0-5]|2[0-4][0-9]|1[0-9]?[0-9]?|[1-9][0-9]?)"
r"(?:\.(?:0|25[0-5]|2[0-4][0-9]|1[0-9]?[0-9]?|[1-9][0-9]?)){3}"
)
ipv6_re = r"\[[0-9a-f:.]+\]" # (simple regex, validated later)
hostname_re = DomainNameValidator.hostname_re
domain_re = DomainNameValidator.domain_re
tld_re = DomainNameValidator.tld_re
host_re = "(" + hostname_re + domain_re + tld_re + "|localhost)"
regex = _lazy_re_compile(
r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately
r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication
r"(?:" + ipv4_re + "|" + ipv6_re + "|" + host_re + ")"
r"(?::[0-9]{1,5})?" # port
r"(?:[/?#][^\s]*)?" # resource path
r"\Z",
re.IGNORECASE,
)
message = _("Enter a valid URL.")
schemes = ["http", "https", "ftp", "ftps"]
unsafe_chars = frozenset("\t\r\n")
max_length = MAX_URL_LENGTH
def __init__(self, schemes=None, **kwargs):
super().__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
if not isinstance(value, str) or len(value) > self.max_length:
raise ValidationError(self.message, code=self.code, params={"value": value})
if self.unsafe_chars.intersection(value):
raise ValidationError(self.message, code=self.code, params={"value": value})
# Check if the scheme is valid.
scheme = value.split("://")[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code, params={"value": value})
# Then check full URL
try:
splitted_url = urlsplit(value)
except ValueError:
raise ValidationError(self.message, code=self.code, params={"value": value})
super().__call__(value)
# Now verify IPv6 in the netloc part
host_match = re.search(r"^\[(.+)\](?::[0-9]{1,5})?$", splitted_url.netloc)
if host_match:
potential_ip = host_match[1]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(
self.message, code=self.code, params={"value": value}
)
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if splitted_url.hostname is None or len(splitted_url.hostname) > 253:
raise ValidationError(self.message, code=self.code, params={"value": value})
integer_validator = RegexValidator(
_lazy_re_compile(r"^-?\d+\Z"),
message=_("Enter a valid integer."),
code="invalid",
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
| URLValidator |
python | walkccc__LeetCode | solutions/1136. Parallel Courses/1136.py | {
"start": 24,
"end": 85
} | class ____(Enum):
INIT = 0
VISITING = 1
VISITED = 2
| State |
python | django__django | django/template/smartif.py | {
"start": 3754,
"end": 4382
} | class ____(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc. is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
| Literal |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass3.py | {
"start": 233,
"end": 545
} | class ____(A):
y: int
def __init__(self, a: A, y: int):
self.__dict__ = a.__dict__
a = A(3)
b = B(a, 5)
# This should generate an error because there is an extra parameter
a = A(3, 4)
# This should generate an error because there is one too few parameters
b = B(a)
A.__new__(A)
B.__new__(B)
| B |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 21745,
"end": 24045
} | class ____(DocumentPatchedEvent):
''' A concrete event representing a change to the title of a Bokeh
Document.
'''
kind = "TitleChanged"
def __init__(self, document: Document, title: str,
setter: Setter | None = None, callback_invoker: Invoker | None = None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
title (str) :
The new title to set on the Document
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.title = title
def combine(self, event: DocumentChangedEvent) -> bool:
'''
'''
if not isinstance(event, TitleChangedEvent):
return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter:
return False
if self.document != event.document:
return False
self.title = event.title
self.callback_invoker = event.callback_invoker
return True
def to_serializable(self, serializer: Serializer) -> TitleChanged:
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'TitleChanged'
'title' : <new title to set>
}
Args:
serializer (Serializer):
'''
return TitleChanged(
kind = self.kind,
title = self.title,
)
@staticmethod
def _handle_event(doc: Document, event: TitleChangedEvent) -> None:
doc.set_title(event.title, event.setter)
| TitleChangedEvent |
python | sphinx-doc__sphinx | sphinx/domains/changeset.py | {
"start": 3775,
"end": 6295
} | class ____(Domain):
"""Domain for changesets."""
name = 'changeset'
label = 'changeset'
initial_data: ClassVar[dict[str, dict[str, list[ChangeSet]]]] = {
'changes': {}, # version -> list of ChangeSet
}
@property
def changesets(self) -> dict[str, list[ChangeSet]]:
return self.data.setdefault('changes', {}) # version -> list of ChangeSet
def note_changeset(self, node: addnodes.versionmodified) -> None:
version = node['version']
module = self.env.ref_context.get('py:module')
objname = self.env.current_document.obj_desc_name
changeset = ChangeSet(
node['type'],
self.env.current_document.docname,
node.line, # type: ignore[arg-type]
module,
objname,
node.astext(),
)
self.changesets.setdefault(version, []).append(changeset)
def clear_doc(self, docname: str) -> None:
for changes in self.changesets.values():
for changeset in changes.copy():
if changeset.docname == docname:
changes.remove(changeset)
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
# XXX duplicates?
for version, otherchanges in otherdata['changes'].items():
changes = self.changesets.setdefault(version, [])
for changeset in otherchanges:
if changeset.docname in docnames:
changes.append(changeset)
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
pass # nothing to do here. All changesets are registered on calling directive.
def get_changesets_for(self, version: str) -> list[ChangeSet]:
return self.changesets.get(version, [])
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_domain(ChangeSetDomain)
app.add_directive('version-deprecated', VersionChange)
app.add_directive('deprecated', VersionChange)
app.add_directive('version-added', VersionChange)
app.add_directive('versionadded', VersionChange)
app.add_directive('version-changed', VersionChange)
app.add_directive('versionchanged', VersionChange)
app.add_directive('version-removed', VersionChange)
app.add_directive('versionremoved', VersionChange)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| ChangeSetDomain |
python | PyCQA__pylint | pylint/checkers/classes/class_checker.py | {
"start": 2227,
"end": 6501
} | class ____(NamedTuple):
args: list[str]
kwonlyargs: list[str]
varargs: str
kwargs: str
def _signature_from_call(call: nodes.Call) -> _CallSignature:
kws = {}
args = []
starred_kws = []
starred_args = []
for keyword in call.keywords or []:
arg, value = keyword.arg, keyword.value
if arg is None and isinstance(value, nodes.Name):
# Starred node, and we are interested only in names,
# otherwise some transformation might occur for the parameter.
starred_kws.append(value.name)
elif isinstance(value, nodes.Name):
kws[arg] = value.name
else:
kws[arg] = None
for arg in call.args:
match arg:
case nodes.Starred(value=nodes.Name(name=name)):
# Positional variadic and a name, otherwise some transformation
# might have occurred.
starred_args.append(name)
case nodes.Name():
args.append(arg.name)
case _:
args.append(None)
return _CallSignature(args, kws, starred_args, starred_kws)
def _signature_from_arguments(arguments: nodes.Arguments) -> _ParameterSignature:
kwarg = arguments.kwarg
vararg = arguments.vararg
args = [
arg.name
for arg in chain(arguments.posonlyargs, arguments.args)
if arg.name != "self"
]
kwonlyargs = [arg.name for arg in arguments.kwonlyargs]
return _ParameterSignature(args, kwonlyargs, vararg, kwarg)
def _definition_equivalent_to_call(
definition: _ParameterSignature, call: _CallSignature
) -> bool:
"""Check if a definition signature is equivalent to a call."""
if definition.kwargs:
if definition.kwargs not in call.starred_kws:
return False
elif call.starred_kws:
return False
if definition.varargs:
if definition.varargs not in call.starred_args:
return False
elif call.starred_args:
return False
if any(kw not in call.kws for kw in definition.kwonlyargs):
return False
if definition.args != call.args:
return False
# No extra kwargs in call.
return all(kw in call.args or kw in definition.kwonlyargs for kw in call.kws)
def _is_trivial_super_delegation(function: nodes.FunctionDef) -> bool:
"""Check whether a function definition is a method consisting only of a
call to the same function on the superclass.
"""
if (
not function.is_method()
# Adding decorators to a function changes behavior and
# constitutes a non-trivial change.
or function.decorators
):
return False
body = function.body
if len(body) != 1:
# Multiple statements, which means this overridden method
# could do multiple things we are not aware of.
return False
statement = body[0]
if not isinstance(statement, (nodes.Expr, nodes.Return)):
# Doing something else than what we are interested in.
return False
call = statement.value
match call := statement.value:
case nodes.Call(func=nodes.Attribute(expr=expr)):
pass
case _:
# Not a super() attribute access.
return False
# Anything other than a super call is non-trivial.
super_call = safe_infer(expr)
if not isinstance(super_call, objects.Super):
return False
# The name should be the same.
if call.func.attrname != function.name:
return False
# Should be a super call with the MRO pointer being the
# current class and the type being the current instance.
current_scope = function.parent.scope()
if not (
super_call.mro_pointer == current_scope
and isinstance(super_call.type, astroid.Instance)
and super_call.type.name == current_scope.name
):
return False
return True
# Deal with parameters overriding in two methods.
def _positional_parameters(method: nodes.FunctionDef) -> list[nodes.AssignName]:
positional = method.args.args
if method.is_bound() and method.type in {"classmethod", "method"}:
positional = positional[1:]
return positional # type: ignore[no-any-return]
| _ParameterSignature |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 18524,
"end": 18665
} | class ____(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 4
| TestIDSTIVFloat |
python | numba__numba | numba/tests/test_analysis.py | {
"start": 33327,
"end": 37230
} | class ____(TestBranchPruneBase):
# Tests that semantic constants rewriting works by virtue of branch pruning
def test_array_ndim_attr(self):
def impl(array):
if array.ndim == 2:
if array.shape[1] == 2:
return 1
else:
return 10
self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False,
None],
np.zeros((2, 3)))
self.assert_prune(impl, (types.Array(types.float64, 1, 'C'),), [True,
'both'],
np.zeros((2,)))
def test_tuple_len(self):
def impl(tup):
if len(tup) == 3:
if tup[2] == 2:
return 1
else:
return 0
self.assert_prune(impl, (types.UniTuple(types.int64, 3),), [False,
None],
tuple([1, 2, 3]))
self.assert_prune(impl, (types.UniTuple(types.int64, 2),), [True,
'both'],
tuple([1, 2]))
def test_attr_not_len(self):
# The purpose of this test is to make sure that the conditions guarding
# the rewrite part do not themselves raise exceptions.
# This produces an `ir.Expr` call node for `float.as_integer_ratio`,
# which is a getattr() on `float`.
@njit
def test():
float.as_integer_ratio(1.23)
# this should raise a TypingError
with self.assertRaises(errors.TypingError) as e:
test()
self.assertIn("Unknown attribute 'as_integer_ratio'", str(e.exception))
def test_ndim_not_on_array(self):
FakeArray = collections.namedtuple('FakeArray', ['ndim'])
fa = FakeArray(ndim=2)
def impl(fa):
if fa.ndim == 2:
return fa.ndim
else:
object()
# check prune works for array ndim
self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False],
np.zeros((2, 3)))
# check prune fails for something with `ndim` attr that is not array
FakeArrayType = types.NamedUniTuple(types.int64, 1, FakeArray)
self.assert_prune(impl, (FakeArrayType,), [None], fa,
flags={'nopython':False, 'forceobj':True})
def test_semantic_const_propagates_before_static_rewrites(self):
# see issue #5015, the ndim needs writing in as a const before
# the rewrite passes run to make e.g. getitems static where possible
@njit
def impl(a, b):
return a.shape[:b.ndim]
args = (np.zeros((5, 4, 3, 2)), np.zeros((1, 1)))
self.assertPreciseEqual(impl(*args), impl.py_func(*args))
def test_tuple_const_propagation(self):
@njit(pipeline_class=IRPreservingTestPipeline)
def impl(*args):
s = 0
for arg in literal_unroll(args):
s += len(arg)
return s
inp = ((), (1, 2, 3), ())
self.assertPreciseEqual(impl(*inp), impl.py_func(*inp))
ol = impl.overloads[impl.signatures[0]]
func_ir = ol.metadata['preserved_ir']
# make sure one of the inplace binop args is a Const
binop_consts = set()
for blk in func_ir.blocks.values():
for expr in blk.find_exprs('inplace_binop'):
inst = blk.find_variable_assignment(expr.rhs.name)
self.assertIsInstance(inst.value, ir.Const)
binop_consts.add(inst.value.value)
self.assertEqual(binop_consts, {len(x) for x in inp})
| TestBranchPrunePostSemanticConstRewrites |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tfr/python/op_reg_gen_test.py | {
"start": 1533,
"end": 2545
} | class ____(test.TestCase):
"""MLIR Generation Tests for MLIR TFR Program."""
def test_op_reg_gen(self):
cxx_code = gen_register_op(sys.modules[__name__])
cxx_code_exp = r"""
CHECK: #include "tensorflow/core/framework/op.h"
CHECK-EMPTY
CHECK: namespace tensorflow {
CHECK-EMPTY
CHECK-LABEL: REGISTER_OP("TestNoOp")
CHECK-NEXT: .Attr("T: numbertype")
CHECK-NEXT: .Output("o1: T");
CHECK-EMPTY
CHECK-LABEL: REGISTER_OP("TestCompositeOp")
CHECK-NEXT: .Input("x: T")
CHECK-NEXT: .Input("y: T")
CHECK-NEXT: .Attr("act: {'', 'relu'}")
CHECK-NEXT: .Attr("trans: bool = true")
CHECK-NEXT: .Attr("T: numbertype")
CHECK-NEXT: .Output("o1: T")
CHECK-NEXT: .Output("o2: T");
CHECK-EMPTY
CHECK: } // namespace tensorflow
"""
self.assertTrue(fw.check(str(cxx_code), cxx_code_exp), str(cxx_code))
if __name__ == '__main__':
test.main()
| TFRGenTensorTest |
python | doocs__leetcode | solution/1300-1399/1387.Sort Integers by The Power Value/Solution.py | {
"start": 176,
"end": 302
} | class ____:
def getKth(self, lo: int, hi: int, k: int) -> int:
return sorted(range(lo, hi + 1), key=f)[k - 1]
| Solution |
python | walkccc__LeetCode | solutions/2833. Furthest Point From Origin/2833.py | {
"start": 0,
"end": 146
} | class ____:
def furthestDistanceFromOrigin(self, moves: str) -> int:
return abs(moves.count('L') - moves.count('R')) + moves.count('_')
| Solution |
python | boto__boto3 | tests/unit/s3/test_inject.py | {
"start": 8412,
"end": 10294
} | class ____(unittest.TestCase):
def setUp(self):
self.client = mock.Mock()
self.resource = mock.Mock()
self.resource.meta.client = self.client
self.head_object_response = {'ContentLength': 5, 'ETag': 'my-etag'}
self.client.head_object.return_value = self.head_object_response
def test_object_summary_load(self):
inject.object_summary_load(self.resource)
assert self.resource.meta.data == {'Size': 5, 'ETag': 'my-etag'}
def test_can_handle_missing_content_length(self):
self.head_object_response.pop('ContentLength')
inject.object_summary_load(self.resource)
assert self.resource.meta.data == {'ETag': 'my-etag'}
def test_disable_threading_if_append_mode(caplog, tmp_path):
config = TransferConfig(use_threads=True)
with open(tmp_path / 'myfile', 'ab') as f:
inject.disable_threading_if_append_mode(config, f)
assert config.use_threads is False
assert 'A single thread will be used' in caplog.text
def test_threading_not_disabled_if_not_append_mode(caplog, tmp_path):
config = TransferConfig(use_threads=True)
with open(tmp_path / 'myfile', 'wb') as f:
inject.disable_threading_if_append_mode(config, f)
assert config.use_threads is True
assert 'A single thread will be used' not in caplog.text
def test_threading_not_disabled_if_mode_non_string(caplog, tmp_path):
config = TransferConfig(use_threads=True)
with gzip.open(tmp_path / 'myfile', 'wb') as f:
# In Python 3.13, gzip started assigning strings
# instead of integers for mode. Explicitly set
# mode to an integer to ensure we test the right behavior.
f.mode = 2
inject.disable_threading_if_append_mode(config, f)
assert config.use_threads is True
assert 'A single thread will be used' not in caplog.text
| TestObejctSummaryLoad |
python | ApeWorX__ape | src/ape/utils/basemodel.py | {
"start": 2701,
"end": 3975
} | class ____(property):
"""
Injected properties are injected class variables that must be set before use.
**NOTE**: do not appear in a Pydantic model's set of properties.
"""
def __get__(self, *args):
arg_strs = []
for argument in args:
try:
arg_str = str(argument)
except Exception as err:
logger.debug(f"Failed calling __str__. Exception: {err}")
arg_strs.append("<?>")
continue
arg_strs.append(arg_str)
error_message = "Value not set"
if arg_strs:
error_message = f"{error_message} (arguments={', '.join(arg_strs)})"
error_message = f"{error_message}. Please inject this property before calling."
raise ValueError(error_message)
def only_raise_attribute_error(fn: Callable) -> Any:
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except AttributeError:
raise # Don't modify or log attr errors.
except Exception as err:
# Wrap the exception in AttributeError
logger.log_debug_stack_trace()
raise ApeAttributeError(f"{err}", base_err=err) from err
return wrapper
| injected_before_use |
python | sympy__sympy | sympy/physics/mechanics/tests/test_pathway.py | {
"start": 12507,
"end": 24944
} | class ____:
def test_is_pathway_base_subclass(self):
assert issubclass(WrappingPathway, PathwayBase)
@pytest.fixture(autouse=True)
def _wrapping_pathway_fixture(self):
self.pA = Point('pA')
self.pB = Point('pB')
self.r = Symbol('r', positive=True)
self.pO = Point('pO')
self.N = ReferenceFrame('N')
self.ax = self.N.z
self.sphere = WrappingSphere(self.r, self.pO)
self.cylinder = WrappingCylinder(self.r, self.pO, self.ax)
self.pathway = WrappingPathway(self.pA, self.pB, self.cylinder)
self.F = Symbol('F')
def test_valid_constructor(self):
instance = WrappingPathway(self.pA, self.pB, self.cylinder)
assert isinstance(instance, WrappingPathway)
assert hasattr(instance, 'attachments')
assert len(instance.attachments) == 2
assert isinstance(instance.attachments[0], Point)
assert instance.attachments[0] == self.pA
assert isinstance(instance.attachments[1], Point)
assert instance.attachments[1] == self.pB
assert hasattr(instance, 'geometry')
assert isinstance(instance.geometry, WrappingGeometryBase)
assert instance.geometry == self.cylinder
@pytest.mark.parametrize(
'attachments',
[
(Point('pA'), ),
(Point('pA'), Point('pB'), Point('pZ')),
]
)
def test_invalid_constructor_attachments_incorrect_number(self, attachments):
with pytest.raises(TypeError):
_ = WrappingPathway(*attachments, self.cylinder)
@staticmethod
@pytest.mark.parametrize(
'attachments',
[
(None, Point('pB')),
(Point('pA'), None),
]
)
def test_invalid_constructor_attachments_not_point(attachments):
with pytest.raises(TypeError):
_ = WrappingPathway(*attachments)
def test_invalid_constructor_geometry_is_not_supplied(self):
with pytest.raises(TypeError):
_ = WrappingPathway(self.pA, self.pB)
@pytest.mark.parametrize(
'geometry',
[
Symbol('r'),
dynamicsymbols('q'),
ReferenceFrame('N'),
ReferenceFrame('N').x,
]
)
def test_invalid_geometry_not_geometry(self, geometry):
with pytest.raises(TypeError):
_ = WrappingPathway(self.pA, self.pB, geometry)
def test_attachments_property_is_immutable(self):
with pytest.raises(TypeError):
self.pathway.attachments[0] = self.pB
with pytest.raises(TypeError):
self.pathway.attachments[1] = self.pA
def test_geometry_property_is_immutable(self):
with pytest.raises(AttributeError):
self.pathway.geometry = None
def test_repr(self):
expected = (
f'WrappingPathway(pA, pB, '
f'geometry={self.cylinder!r})'
)
assert repr(self.pathway) == expected
@staticmethod
def _expand_pos_to_vec(pos, frame):
return sum(mag*unit for (mag, unit) in zip(pos, frame))
@pytest.mark.parametrize(
'pA_vec, pB_vec, factor',
[
((1, 0, 0), (0, 1, 0), pi/2),
((0, 1, 0), (sqrt(2)/2, -sqrt(2)/2, 0), 3*pi/4),
((1, 0, 0), (Rational(1, 2), sqrt(3)/2, 0), pi/3),
]
)
def test_static_pathway_on_sphere_length(self, pA_vec, pB_vec, factor):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.sphere)
expected = factor*self.r
assert simplify(pathway.length - expected) == 0
@pytest.mark.parametrize(
'pA_vec, pB_vec, factor',
[
((1, 0, 0), (0, 1, 0), Rational(1, 2)*pi),
((1, 0, 0), (-1, 0, 0), pi),
((-1, 0, 0), (1, 0, 0), pi),
((0, 1, 0), (sqrt(2)/2, -sqrt(2)/2, 0), 5*pi/4),
((1, 0, 0), (Rational(1, 2), sqrt(3)/2, 0), pi/3),
(
(0, 1, 0),
(sqrt(2)*Rational(1, 2), -sqrt(2)*Rational(1, 2), 1),
sqrt(1 + (Rational(5, 4)*pi)**2),
),
(
(1, 0, 0),
(Rational(1, 2), sqrt(3)*Rational(1, 2), 1),
sqrt(1 + (Rational(1, 3)*pi)**2),
),
]
)
def test_static_pathway_on_cylinder_length(self, pA_vec, pB_vec, factor):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.cylinder)
expected = factor*sqrt(self.r**2)
assert simplify(pathway.length - expected) == 0
@pytest.mark.parametrize(
'pA_vec, pB_vec',
[
((1, 0, 0), (0, 1, 0)),
((0, 1, 0), (sqrt(2)*Rational(1, 2), -sqrt(2)*Rational(1, 2), 0)),
((1, 0, 0), (Rational(1, 2), sqrt(3)*Rational(1, 2), 0)),
]
)
def test_static_pathway_on_sphere_extension_velocity(self, pA_vec, pB_vec):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.sphere)
assert pathway.extension_velocity == 0
@pytest.mark.parametrize(
'pA_vec, pB_vec',
[
((1, 0, 0), (0, 1, 0)),
((1, 0, 0), (-1, 0, 0)),
((-1, 0, 0), (1, 0, 0)),
((0, 1, 0), (sqrt(2)/2, -sqrt(2)/2, 0)),
((1, 0, 0), (Rational(1, 2), sqrt(3)/2, 0)),
((0, 1, 0), (sqrt(2)*Rational(1, 2), -sqrt(2)/2, 1)),
((1, 0, 0), (Rational(1, 2), sqrt(3)/2, 1)),
]
)
def test_static_pathway_on_cylinder_extension_velocity(self, pA_vec, pB_vec):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.cylinder)
assert pathway.extension_velocity == 0
@pytest.mark.parametrize(
'pA_vec, pB_vec, pA_vec_expected, pB_vec_expected, pO_vec_expected',
(
((1, 0, 0), (0, 1, 0), (0, 1, 0), (1, 0, 0), (-1, -1, 0)),
(
(0, 1, 0),
(sqrt(2)/2, -sqrt(2)/2, 0),
(1, 0, 0),
(sqrt(2)/2, sqrt(2)/2, 0),
(-1 - sqrt(2)/2, -sqrt(2)/2, 0)
),
(
(1, 0, 0),
(Rational(1, 2), sqrt(3)/2, 0),
(0, 1, 0),
(sqrt(3)/2, -Rational(1, 2), 0),
(-sqrt(3)/2, Rational(1, 2) - 1, 0),
),
)
)
def test_static_pathway_on_sphere_to_loads(
self,
pA_vec,
pB_vec,
pA_vec_expected,
pB_vec_expected,
pO_vec_expected,
):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.sphere)
pA_vec_expected = sum(
mag*unit for (mag, unit) in zip(pA_vec_expected, self.N)
)
pB_vec_expected = sum(
mag*unit for (mag, unit) in zip(pB_vec_expected, self.N)
)
pO_vec_expected = sum(
mag*unit for (mag, unit) in zip(pO_vec_expected, self.N)
)
expected = [
Force(self.pA, self.F*(self.r**3/sqrt(self.r**6))*pA_vec_expected),
Force(self.pB, self.F*(self.r**3/sqrt(self.r**6))*pB_vec_expected),
Force(self.pO, self.F*(self.r**3/sqrt(self.r**6))*pO_vec_expected),
]
assert pathway.to_loads(self.F) == expected
@pytest.mark.parametrize(
'pA_vec, pB_vec, pA_vec_expected, pB_vec_expected, pO_vec_expected',
(
((1, 0, 0), (0, 1, 0), (0, 1, 0), (1, 0, 0), (-1, -1, 0)),
((1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, 1, 0), (0, -2, 0)),
((-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, -1, 0), (0, 2, 0)),
(
(0, 1, 0),
(sqrt(2)/2, -sqrt(2)/2, 0),
(-1, 0, 0),
(-sqrt(2)/2, -sqrt(2)/2, 0),
(1 + sqrt(2)/2, sqrt(2)/2, 0)
),
(
(1, 0, 0),
(Rational(1, 2), sqrt(3)/2, 0),
(0, 1, 0),
(sqrt(3)/2, -Rational(1, 2), 0),
(-sqrt(3)/2, Rational(1, 2) - 1, 0),
),
(
(1, 0, 0),
(sqrt(2)/2, sqrt(2)/2, 0),
(0, 1, 0),
(sqrt(2)/2, -sqrt(2)/2, 0),
(-sqrt(2)/2, sqrt(2)/2 - 1, 0),
),
((0, 1, 0), (0, 1, 1), (0, 0, 1), (0, 0, -1), (0, 0, 0)),
(
(0, 1, 0),
(sqrt(2)/2, -sqrt(2)/2, 1),
(-5*pi/sqrt(16 + 25*pi**2), 0, 4/sqrt(16 + 25*pi**2)),
(
-5*sqrt(2)*pi/(2*sqrt(16 + 25*pi**2)),
-5*sqrt(2)*pi/(2*sqrt(16 + 25*pi**2)),
-4/sqrt(16 + 25*pi**2),
),
(
5*(sqrt(2) + 2)*pi/(2*sqrt(16 + 25*pi**2)),
5*sqrt(2)*pi/(2*sqrt(16 + 25*pi**2)),
0,
),
),
)
)
def test_static_pathway_on_cylinder_to_loads(
self,
pA_vec,
pB_vec,
pA_vec_expected,
pB_vec_expected,
pO_vec_expected,
):
pA_vec = self._expand_pos_to_vec(pA_vec, self.N)
pB_vec = self._expand_pos_to_vec(pB_vec, self.N)
self.pA.set_pos(self.pO, self.r*pA_vec)
self.pB.set_pos(self.pO, self.r*pB_vec)
pathway = WrappingPathway(self.pA, self.pB, self.cylinder)
pA_force_expected = self.F*self._expand_pos_to_vec(pA_vec_expected,
self.N)
pB_force_expected = self.F*self._expand_pos_to_vec(pB_vec_expected,
self.N)
pO_force_expected = self.F*self._expand_pos_to_vec(pO_vec_expected,
self.N)
expected = [
Force(self.pA, pA_force_expected),
Force(self.pB, pB_force_expected),
Force(self.pO, pO_force_expected),
]
assert _simplify_loads(pathway.to_loads(self.F)) == expected
def test_2D_pathway_on_cylinder_length(self):
q = dynamicsymbols('q')
pA_pos = self.r*self.N.x
pB_pos = self.r*(cos(q)*self.N.x + sin(q)*self.N.y)
self.pA.set_pos(self.pO, pA_pos)
self.pB.set_pos(self.pO, pB_pos)
expected = self.r*sqrt(q**2)
assert simplify(self.pathway.length - expected) == 0
def test_2D_pathway_on_cylinder_extension_velocity(self):
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
pA_pos = self.r*self.N.x
pB_pos = self.r*(cos(q)*self.N.x + sin(q)*self.N.y)
self.pA.set_pos(self.pO, pA_pos)
self.pB.set_pos(self.pO, pB_pos)
expected = self.r*(sqrt(q**2)/q)*qd
assert simplify(self.pathway.extension_velocity - expected) == 0
def test_2D_pathway_on_cylinder_to_loads(self):
q = dynamicsymbols('q')
pA_pos = self.r*self.N.x
pB_pos = self.r*(cos(q)*self.N.x + sin(q)*self.N.y)
self.pA.set_pos(self.pO, pA_pos)
self.pB.set_pos(self.pO, pB_pos)
pA_force = self.F*self.N.y
pB_force = self.F*(sin(q)*self.N.x - cos(q)*self.N.y)
pO_force = self.F*(-sin(q)*self.N.x + (cos(q) - 1)*self.N.y)
expected = [
Force(self.pA, pA_force),
Force(self.pB, pB_force),
Force(self.pO, pO_force),
]
loads = _simplify_loads(self.pathway.to_loads(self.F))
assert loads == expected
| TestWrappingPathway |
python | huggingface__transformers | tests/models/vit_mae/test_modeling_vit_mae.py | {
"start": 12426,
"end": 15752
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base")
@cached_property
def default_model(self):
return ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device)
@slow
def test_inference_for_pretraining(self):
np.random.seed(2)
model = self.default_model
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
vit_mae_config = ViTMAEConfig()
num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
noise = torch.from_numpy(np.random.uniform(size=(1, num_patches))).to(device=torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, noise=noise)
# verify the logits
expected_shape = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]
)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice.to(torch_device), rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# ViTMAE models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
np.random.seed(2)
model = self.default_model
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt", do_resize=False).to(torch_device)
vit_mae_config = ViTMAEConfig()
num_patches = (image.height // vit_mae_config.patch_size) * (image.width // vit_mae_config.patch_size)
noise = torch.from_numpy(np.random.uniform(size=(1, num_patches))).to(device=torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, noise=noise, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 1200, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
@slow
def test_inference_interpolate_pos_encoding_custom_sizes(self):
# Ensure custom sizes are correctly handled when interpolating the position embeddings
np.random.seed(2)
model = self.default_model
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt", size={"height": 256, "width": 256}).to(
torch_device
)
# forward pass
with torch.no_grad():
outputs = model(
**inputs,
interpolate_pos_encoding=True,
)
# verify the logits
expected_shape = torch.Size((1, 256, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
| ViTMAEModelIntegrationTest |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 2236,
"end": 2639
} | class ____(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
| EPipeSocket |
python | sqlalchemy__sqlalchemy | tools/format_docs_code.py | {
"start": 1134,
"end": 14285
} | class ____(NamedTuple):
line: str
line_no: int
code: str
padding: str | None = None # relevant only on first line of block
sql_marker: str | None = None
_Block = list[BlockLine]
def _format_block(
input_block: _Block,
exit_on_error: bool,
errors: list[tuple[int, str, Exception]],
is_doctest: bool,
file: str,
is_python_file: bool,
) -> list[str]:
if not is_doctest:
# The first line may have additional padding. Remove then restore later
add_padding = start_space.match(input_block[0].code).groups()[0]
skip = len(add_padding)
code = "\n".join(
l.code[skip:] if l.code.startswith(add_padding) else l.code
for l in input_block
)
else:
add_padding = None
code = "\n".join(l.code for l in input_block)
mode = PYTHON_BLACK_MODE if is_python_file else RST_BLACK_MODE
custom_target = CUSTOM_TARGET_VERSIONS.get(Path(file).name)
if custom_target:
mode = dataclasses.replace(
mode, target_versions={TargetVersion[custom_target]}
)
try:
formatted = format_str(code, mode=mode)
except Exception as e:
start_line = input_block[0].line_no
first_error = not errors
if not REPORT_ONLY_DOCTEST or is_doctest:
type_ = "doctest" if is_doctest else "plain"
errors.append((start_line, code, e))
if first_error:
print() # add newline
print(
f"--- {file}:{start_line} Could not format {type_} code "
f"block:\n{code}\n---Error: {e}"
)
if exit_on_error:
print("Exiting since --exit-on-error was passed")
raise
else:
print("Ignoring error")
return [l.line for l in input_block]
else:
formatted_code_lines = formatted.splitlines()
padding = input_block[0].padding
sql_prefix = input_block[0].sql_marker or ""
if is_doctest:
formatted_lines = [
f"{padding}{sql_prefix}>>> {formatted_code_lines[0]}",
*(
f"{padding}...{' ' if fcl else ''}{fcl}"
for fcl in formatted_code_lines[1:]
),
]
else:
formatted_lines = [
f"{padding}{add_padding}{sql_prefix}{formatted_code_lines[0]}",
*(
f"{padding}{add_padding}{fcl}" if fcl else fcl
for fcl in formatted_code_lines[1:]
),
]
if not input_block[-1].line and formatted_lines[-1]:
# last line was empty and black removed it. restore it
formatted_lines.append("")
return formatted_lines
format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$")
doctest_code_start = re.compile(
r"^(\s+)({(?:opensql|execsql|printsql|sql|stop)})?>>>\s?(.+)"
)
doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)")
sql_code_start = re.compile(r"^(\s+)({(?:open|print|exec)?sql})")
sql_code_stop = re.compile(r"^(\s+){stop}")
start_code_section = re.compile(
r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$"
)
start_space = re.compile(r"^(\s*)[^ ]?")
not_python_line = re.compile(r"^\s+[$:]")
def format_file(
file: Path, exit_on_error: bool, check: bool
) -> tuple[bool, int]:
buffer = []
if not check:
print(f"Running file {file} ..", end="")
original = file.read_text("utf-8")
doctest_block: _Block | None = None
plain_block: _Block | None = None
is_python_file = file.suffix == ".py"
plain_code_section = False
plain_padding = None
plain_padding_len = None
sql_section = False
errors = []
do_doctest_format = partial(
_format_block,
exit_on_error=exit_on_error,
errors=errors,
is_doctest=True,
file=str(file),
is_python_file=is_python_file,
)
def doctest_format():
nonlocal doctest_block
if doctest_block:
buffer.extend(do_doctest_format(doctest_block))
doctest_block = None
do_plain_format = partial(
_format_block,
exit_on_error=exit_on_error,
errors=errors,
is_doctest=False,
file=str(file),
is_python_file=is_python_file,
)
def plain_format():
nonlocal plain_block
if plain_block:
buffer.extend(do_plain_format(plain_block))
plain_block = None
disable_format = False
for line_no, line in enumerate(original.splitlines(), 1):
if (
line
and not disable_format
and start_code_section.match(line.strip())
):
# start_code_section regexp requires no spaces at the start
plain_format()
plain_code_section = True
assert not sql_section
plain_padding = start_space.match(line).groups()[0]
plain_padding_len = len(plain_padding)
buffer.append(line)
continue
elif (
plain_code_section
and line.strip()
and not line.startswith(" " * (plain_padding_len + 1))
):
plain_code_section = sql_section = False
elif match := format_directive.match(line):
assert not plain_code_section
disable_format = match.groups()[0] == "off"
if doctest_block:
assert not plain_block
if match := doctest_code_continue.match(line):
doctest_block.append(
BlockLine(line, line_no, match.groups()[0])
)
continue
else:
doctest_format()
elif plain_block:
if (
plain_code_section
and not doctest_code_start.match(line)
and not sql_code_start.match(line)
):
plain_block.append(
BlockLine(line, line_no, line[plain_padding_len:])
)
continue
else:
plain_format()
if line and (match := doctest_code_start.match(line)):
# the line is in a doctest
plain_code_section = sql_section = False
plain_format()
padding, sql_marker, code = match.groups()
doctest_block = [
BlockLine(line, line_no, code, padding, sql_marker)
]
elif line and plain_code_section:
assert not disable_format
assert not doctest_block
if match := sql_code_start.match(line):
plain_format()
sql_section = True
buffer.append(line)
elif sql_section:
if match := sql_code_stop.match(line):
sql_section = False
no_stop_line = line.replace("{stop}", "")
# start of a plain block
if no_stop_line.strip():
assert not plain_block
plain_block = [
BlockLine(
line,
line_no,
no_stop_line[plain_padding_len:],
plain_padding,
"{stop}",
)
]
continue
buffer.append(line)
elif (
is_python_file
and not plain_block
and not_python_line.match(line)
):
# not a python block. ignore it
plain_code_section = False
buffer.append(line)
else:
# start of a plain block
assert not doctest_block
plain_block = [
BlockLine(
line,
line_no,
line[plain_padding_len:],
plain_padding,
)
]
else:
buffer.append(line)
doctest_format()
plain_format()
if buffer:
buffer.append("")
updated = "\n".join(buffer)
equal = original == updated
if not check:
print(
f"..done. {len(errors)} error(s).",
"No changes" if equal else "Changes detected",
)
if not equal:
# write only if there are changes to write
file.write_text(updated, "utf-8", newline="\n")
else:
# if there is nothing in the buffer something strange happened so
# don't do anything
if not check:
print(".. Nothing to write")
equal = bool(original) is False
if check:
if not equal:
print(f"File {file} would be formatted")
return equal, len(errors)
def iter_files(directory: str) -> Iterator[Path]:
dir_path = home / directory
for file in chain(dir_path.glob("./**/*.rst"), dir_path.glob("./**/*.py")):
local = file.relative_to(home).as_posix()
if any(pattern.match(local) for pattern in include_paths) and not any(
pattern.match(local) for pattern in ignore_paths
):
yield file
def main(
file: list[str] | None, directory: str, exit_on_error: bool, check: bool
):
if file is not None:
result = [format_file(Path(f), exit_on_error, check) for f in file]
else:
result = [
format_file(doc, exit_on_error, check)
for doc in iter_files(directory)
]
if check:
formatting_error_counts = [e for _, e in result if e]
to_reformat = len([b for b, _ in result if not b])
if not to_reformat and not formatting_error_counts:
print("All files are correctly formatted")
exit(0)
else:
print(
f"{to_reformat} file(s) would be reformatted;",
(
(
f"{sum(formatting_error_counts)} formatting errors "
f"reported in {len(formatting_error_counts)} files"
)
if formatting_error_counts
else "no formatting errors reported"
),
)
exit(1)
if __name__ == "__main__":
parser = ArgumentParser(
description="""Formats code inside docs using black. Supports \
doctest code blocks and plain code block identified as indented sections \
that are preceded by ``::`` or ``.. sourcecode:: py``.
To disable formatting on a file section the comment ``.. format: off`` \
disables formatting until ``.. format: on`` is encountered or the file ends.
Use --report-doctest to ignore errors on plain code blocks.
""",
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument(
"-f",
"--file",
help="Format only this file instead of all docs",
nargs="+",
)
parser.add_argument(
"-d",
"--directory",
help="Find documents in this directory and its sub dirs",
default=".",
)
parser.add_argument(
"-c",
"--check",
help="Don't write the files back, just return the "
"status. Return code 0 means nothing would change. "
"Return code 1 means some files would be reformatted",
action="store_true",
)
parser.add_argument(
"-e",
"--exit-on-error",
help="Exit in case of black format error instead of ignoring it",
action="store_true",
)
parser.add_argument(
"-l",
"--project-line-length",
help="Configure the line length to the project value instead "
"of using the black default of 88. Python files always use the"
"project line length",
action="store_true",
)
parser.add_argument(
"-rd",
"--report-doctest",
help="Report errors only when running doctest blocks. When active "
"exit-on-error will be valid only on doctest blocks",
action="store_true",
)
args = parser.parse_args()
config = parse_pyproject_toml(home / "pyproject.toml")
target_versions = {
TargetVersion[val.upper()]
for val in config.get("target_version", [])
if val != "py27"
}
RST_BLACK_MODE = Mode(
target_versions=target_versions,
line_length=(
config.get("line_length", DEFAULT_LINE_LENGTH)
if args.project_line_length
else DEFAULT_LINE_LENGTH
),
)
PYTHON_BLACK_MODE = Mode(
target_versions=target_versions,
# Remove a few char to account for normal indent
line_length=(config.get("line_length", 4) - 4 or DEFAULT_LINE_LENGTH),
)
REPORT_ONLY_DOCTEST = args.report_doctest
main(args.file, args.directory, args.exit_on_error, args.check)
| BlockLine |
python | scipy__scipy | scipy/integrate/_rules/_gauss_legendre.py | {
"start": 174,
"end": 1733
} | class ____(FixedRule):
"""
Gauss-Legendre quadrature.
Parameters
----------
npoints : int
Number of nodes for the higher-order rule.
xp : array_namespace, optional
The namespace for the node and weight arrays. Default is None, where NumPy is
used.
Examples
--------
Evaluate a 1D integral. Note in this example that ``f`` returns an array, so the
estimates will also be arrays.
>>> import numpy as np
>>> from scipy.integrate import cubature
>>> from scipy.integrate._rules import GaussLegendreQuadrature
>>> def f(x):
... return np.cos(x)
>>> rule = GaussLegendreQuadrature(21) # Use 21-point GaussLegendre
>>> a, b = np.array([0]), np.array([1])
>>> rule.estimate(f, a, b) # True value sin(1), approximately 0.84147
array([0.84147098])
>>> rule.estimate_error(f, a, b)
array([1.11022302e-16])
"""
def __init__(self, npoints, xp=None):
if npoints < 2:
raise ValueError(
"At least 2 nodes required for Gauss-Legendre cubature"
)
self.npoints = npoints
if xp is None:
xp = np_compat
self.xp = array_namespace(xp.empty(0))
@cached_property
def nodes_and_weights(self):
# TODO: current converting to/from numpy
nodes, weights = roots_legendre(self.npoints)
return (
self.xp.asarray(nodes, dtype=self.xp.float64),
self.xp.asarray(weights, dtype=self.xp.float64)
)
| GaussLegendreQuadrature |
python | apache__airflow | task-sdk/tests/task_sdk/api/test_client.py | {
"start": 28339,
"end": 36898
} | class ____:
"""
Test that the XComOperations class works as expected. While the operations are simple, it
still catches the basic functionality of the client for xcoms including endpoint and
response parsing.
"""
@pytest.mark.parametrize(
"value",
[
pytest.param("value1", id="string-value"),
pytest.param({"key1": "value1"}, id="dict-value"),
pytest.param('{"key1": "value1"}', id="dict-str-value"),
pytest.param(["value1", "value2"], id="list-value"),
pytest.param({"key": "test_key", "value": {"key2": "value2"}}, id="nested-dict-value"),
],
)
def test_xcom_get_success(self, value):
with time_machine.travel("2023-01-01T00:00:00Z", tick=False):
# Simulate a successful response from the server when getting an xcom
# ...including a validation that retry really works
call_count = 0
def handle_request(request: httpx.Request) -> httpx.Response:
nonlocal call_count
call_count += 1
if call_count < 3:
return httpx.Response(status_code=500, json={"detail": "Internal Server Error"})
if request.url.path == "/xcoms/dag_id/run_id/task_id/key":
return httpx.Response(
status_code=201,
json={"key": "test_key", "value": value},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.get(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
)
assert isinstance(result, XComResponse)
assert result.key == "test_key"
assert result.value == value
assert call_count == 3
def test_xcom_get_success_with_map_index(self):
# Simulate a successful response from the server when getting an xcom with map_index passed
def handle_request(request: httpx.Request) -> httpx.Response:
if (
request.url.path == "/xcoms/dag_id/run_id/task_id/key"
and request.url.params.get("map_index") == "2"
):
return httpx.Response(
status_code=201,
json={"key": "test_key", "value": "test_value"},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.get(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
map_index=2,
)
assert isinstance(result, XComResponse)
assert result.key == "test_key"
assert result.value == "test_value"
def test_xcom_get_success_with_include_prior_dates(self):
# Simulate a successful response from the server when getting an xcom with include_prior_dates passed
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/xcoms/dag_id/run_id/task_id/key" and request.url.params.get(
"include_prior_dates"
):
return httpx.Response(
status_code=201,
json={"key": "test_key", "value": "test_value"},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.get(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
include_prior_dates=True,
)
assert isinstance(result, XComResponse)
assert result.key == "test_key"
assert result.value == "test_value"
def test_xcom_get_500_error(self):
with time_machine.travel("2023-01-01T00:00:00Z", tick=False):
# Simulate a successful response from the server returning a 500 error
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/xcoms/dag_id/run_id/task_id/key":
return httpx.Response(
status_code=500,
headers=[("content-Type", "application/json")],
json={
"reason": "invalid_format",
"message": "XCom value is not a valid JSON",
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
with pytest.raises(ServerResponseError):
client.xcoms.get(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
)
@pytest.mark.parametrize(
"values",
[
pytest.param("value1", id="string-value"),
pytest.param({"key1": "value1"}, id="dict-value"),
pytest.param(["value1", "value2"], id="list-value"),
pytest.param({"key": "test_key", "value": {"key2": "value2"}}, id="nested-dict-value"),
],
)
def test_xcom_set_success(self, values):
# Simulate a successful response from the server when setting an xcom
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/xcoms/dag_id/run_id/task_id/key":
assert json.loads(request.read()) == values
return httpx.Response(
status_code=201,
json={"message": "XCom successfully set"},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.set(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
value=values,
)
assert result == OKResponse(ok=True)
def test_xcom_set_with_map_index(self):
# Simulate a successful response from the server when setting an xcom with map_index passed
def handle_request(request: httpx.Request) -> httpx.Response:
if (
request.url.path == "/xcoms/dag_id/run_id/task_id/key"
and request.url.params.get("map_index") == "2"
):
assert json.loads(request.read()) == "value1"
return httpx.Response(
status_code=201,
json={"message": "XCom successfully set"},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.set(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
value="value1",
map_index=2,
)
assert result == OKResponse(ok=True)
def test_xcom_set_with_mapped_length(self):
# Simulate a successful response from the server when setting an xcom with mapped_length
def handle_request(request: httpx.Request) -> httpx.Response:
if (
request.url.path == "/xcoms/dag_id/run_id/task_id/key"
and request.url.params.get("map_index") == "2"
and request.url.params.get("mapped_length") == "3"
):
assert json.loads(request.read()) == "value1"
return httpx.Response(
status_code=201,
json={"message": "XCom successfully set"},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.xcoms.set(
dag_id="dag_id",
run_id="run_id",
task_id="task_id",
key="key",
value="value1",
map_index=2,
mapped_length=3,
)
assert result == OKResponse(ok=True)
| TestXCOMOperations |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 25440,
"end": 25561
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Gentoo'
strategy_class = OpenRCStrategy
| GentooHostname |
python | run-llama__llama_index | llama-index-core/tests/indices/knowledge_graph/test_retrievers.py | {
"start": 545,
"end": 6606
} | class ____(BaseEmbedding):
@classmethod
def class_name(cls) -> str:
return "MockEmbedding"
async def _aget_query_embedding(self, query: str) -> List[float]:
del query
return [0, 0, 1, 0, 0]
async def _aget_text_embedding(self, text: str) -> List[float]:
# assume dimensions are 4
if text == "('foo', 'is', 'bar')":
return [1, 0, 0, 0]
elif text == "('hello', 'is not', 'world')":
return [0, 1, 0, 0]
elif text == "('Jane', 'is mother of', 'Bob')":
return [0, 0, 1, 0]
elif text == "foo":
return [0, 0, 0, 1]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def _get_text_embedding(self, text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 4
if text == "('foo', 'is', 'bar')":
return [1, 0, 0, 0]
elif text == "('hello', 'is not', 'world')":
return [0, 1, 0, 0]
elif text == "('Jane', 'is mother of', 'Bob')":
return [0, 0, 1, 0]
elif text == "foo":
return [0, 0, 0, 1]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def _get_query_embedding(self, query: str) -> List[float]:
"""Mock get query embedding."""
del query
return [0, 0, 1, 0, 0]
def mock_extract_triplets(text: str) -> List[Tuple[str, str, str]]:
"""Mock extract triplets."""
lines = text.split("\n")
triplets: List[Tuple[str, str, str]] = []
for line in lines:
tokens = line[1:-1].split(",")
tokens = [t.strip() for t in tokens]
subj, pred, obj = tokens
triplets.append((subj, pred, obj))
return triplets
@patch.object(
KnowledgeGraphIndex, "_extract_triplets", side_effect=mock_extract_triplets
)
def test_as_retriever(_patch_extract_triplets: Any, documents: List[Document]) -> None:
"""Test query."""
graph_store = SimpleGraphStore()
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents, storage_context=storage_context
)
retriever: KGTableRetriever = index.as_retriever() # type: ignore
nodes = retriever.retrieve(QueryBundle("foo"))
# when include_text is True, the first node is the raw text
# the second node is the query
rel_initial_text = (
f"The following are knowledge sequence in max depth"
f" {retriever.graph_store_query_depth} "
f"in the form of directed graph like:\n"
f"`subject -[predicate]->, object, <-[predicate_next_hop]-,"
f" object_next_hop ...`"
)
raw_text = "['foo', 'is', 'bar']"
query = rel_initial_text + "\n" + raw_text
assert len(nodes) == 2
assert nodes[1].node.get_content() == query
@patch.object(
KnowledgeGraphIndex, "_extract_triplets", side_effect=mock_extract_triplets
)
def test_retrievers(_patch_extract_triplets: Any, documents: List[Document]) -> None:
# test specific retriever class
graph_store = SimpleGraphStore()
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents, storage_context=storage_context
)
retriever = KGTableRetriever(
index,
query_keyword_extract_template=MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
graph_store=graph_store,
)
query_bundle = QueryBundle(query_str="foo", custom_embedding_strs=["foo"])
nodes = retriever.retrieve(query_bundle)
assert (
nodes[1].node.get_content()
== "The following are knowledge sequence in max depth 2"
" in the form of directed graph like:\n"
"`subject -[predicate]->, object, <-[predicate_next_hop]-,"
" object_next_hop ...`"
"\n['foo', 'is', 'bar']"
)
@patch.object(
KnowledgeGraphIndex, "_extract_triplets", side_effect=mock_extract_triplets
)
def test_retriever_no_text(
_patch_extract_triplets: Any, documents: List[Document]
) -> None:
# test specific retriever class
graph_store = SimpleGraphStore()
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents, storage_context=storage_context
)
retriever = KGTableRetriever(
index,
query_keyword_extract_template=MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
include_text=False,
graph_store=graph_store,
)
query_bundle = QueryBundle(query_str="foo", custom_embedding_strs=["foo"])
nodes = retriever.retrieve(query_bundle)
assert (
nodes[0].node.get_content()
== "The following are knowledge sequence in max depth 2"
" in the form of directed graph like:\n"
"`subject -[predicate]->, object, <-[predicate_next_hop]-,"
" object_next_hop ...`"
"\n['foo', 'is', 'bar']"
)
@patch.object(
KnowledgeGraphIndex, "_extract_triplets", side_effect=mock_extract_triplets
)
def test_retrieve_similarity(
_patch_extract_triplets: Any, documents: List[Document]
) -> None:
"""Test query."""
graph_store = SimpleGraphStore()
storage_context = StorageContext.from_defaults(graph_store=graph_store)
index = KnowledgeGraphIndex.from_documents(
documents,
include_embeddings=True,
storage_context=storage_context,
embed_model=MockEmbedding(),
)
retriever = KGTableRetriever(index, similarity_top_k=2, graph_store=graph_store)
# returns only two rel texts to use for generating response
# uses hyrbid query by default
nodes = retriever.retrieve(QueryBundle("foo"))
assert (
nodes[1].node.get_content()
== "The following are knowledge sequence in max depth 2"
" in the form of directed graph like:\n"
"`subject -[predicate]->, object, <-[predicate_next_hop]-,"
" object_next_hop ...`"
"\n['foo', 'is', 'bar']"
)
| MockEmbedding |
python | numpy__numpy | numpy/distutils/fcompiler/sun.py | {
"start": 138,
"end": 1577
} | class ____(FCompiler):
compiler_type = 'sun'
description = 'Sun or Forte Fortran 95 Compiler'
# ex:
# f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
version_match = simple_version_match(
start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90", "-fixed"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>", "-Bdynamic", "-G"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-moddir='
module_include_switch = '-M'
pic_flags = ['-xcode=pic32']
def get_flags_f77(self):
ret = ["-ftrap=%none"]
if (self.get_version() or '') >= '7':
ret.append("-f77")
else:
ret.append("-fixed")
return ret
def get_opt(self):
return ['-fast', '-dalign']
def get_arch(self):
return ['-xtarget=generic']
def get_libraries(self):
opt = []
opt.extend(['fsu', 'sunmath', 'mvec'])
return opt
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='sun').get_version())
| SunFCompiler |
python | numba__numba | numba/tests/test_exceptions.py | {
"start": 718,
"end": 1192
} | class ____(Exception):
def __init__(self, arg, value0):
super(UDEArgsToSuper, self).__init__(arg)
self.value0 = value0
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
same = True
same |= self.args == other.args
same |= self.value0 == other.value0
return same
def __hash__(self):
return hash((super(UDEArgsToSuper).__hash__(), self.value0))
| UDEArgsToSuper |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 112928,
"end": 113517
} | class ____(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
| SendmsgConnectionlessTests |
python | scipy__scipy | scipy/sparse/tests/test_arithmetic1d.py | {
"start": 786,
"end": 11984
} | class ____:
def test_empty_arithmetic(self, spcreator):
shape = (5,)
for mytype in [
np.dtype('int32'),
np.dtype('float32'),
np.dtype('float64'),
np.dtype('complex64'),
np.dtype('complex128'),
]:
a = spcreator(shape, dtype=mytype)
b = a + a
c = 2 * a
assert isinstance(a @ a.tocsr(), np.ndarray)
assert isinstance(a @ a.tocoo(), np.ndarray)
for m in [a, b, c]:
assert m @ m == a.toarray() @ a.toarray()
assert m.dtype == mytype
assert toarray(m).dtype == mytype
def test_abs(self, spcreator):
A = np.array([-1, 0, 17, 0, -5, 0, 1, -4, 0, 0, 0, 0], 'd')
assert_equal(abs(A), abs(spcreator(A)).toarray())
def test_round(self, spcreator):
A = np.array([-1.35, 0.56, 17.25, -5.98], 'd')
Asp = spcreator(A)
assert_equal(np.around(A, decimals=1), round(Asp, ndigits=1).toarray())
def test_elementwise_power(self, spcreator):
A = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4], 'd')
Asp = spcreator(A)
assert_equal(np.power(A, 2), Asp.power(2).toarray())
# element-wise power function needs a scalar power
with pytest.raises(NotImplementedError, match='input is not scalar'):
spcreator(A).power(A)
def test_real(self, spcreator):
D = np.array([1 + 3j, 2 - 4j])
A = spcreator(D)
assert_equal(A.real.toarray(), D.real)
def test_imag(self, spcreator):
D = np.array([1 + 3j, 2 - 4j])
A = spcreator(D)
assert_equal(A.imag.toarray(), D.imag)
def test_mul_scalar(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
assert_equal(dat * 2, (datsp * 2).toarray())
assert_equal(dat * 17.3, (datsp * 17.3).toarray())
def test_rmul_scalar(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
assert_equal(2 * dat, (2 * datsp).toarray())
assert_equal(17.3 * dat, (17.3 * datsp).toarray())
def test_sub(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
assert_equal((datsp - datsp).toarray(), np.zeros(4))
assert_equal((datsp - 0).toarray(), dat)
A = spcreator([1, -4, 0, 2], dtype='d')
assert_equal((datsp - A).toarray(), dat - A.toarray())
assert_equal((A - datsp).toarray(), A.toarray() - dat)
# test broadcasting
assert_equal(datsp.toarray() - dat[0], dat - dat[0])
def test_add0(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
# Adding 0 to a sparse matrix
assert_equal((datsp + 0).toarray(), dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * datsp for k in range(1, 3)])
sumD = sum([k * dat for k in range(1, 3)])
assert_allclose(sumS.toarray(), sumD)
def test_elementwise_multiply(self, spcreator):
# real/real
A = np.array([4, 0, 9])
B = np.array([0, 7, -1])
Asp = spcreator(A)
Bsp = spcreator(B)
assert_allclose(Asp.multiply(Bsp).toarray(), A * B) # sparse/sparse
assert_allclose(Asp.multiply(B).toarray(), A * B) # sparse/dense
# complex/complex
C = np.array([1 - 2j, 0 + 5j, -1 + 0j])
D = np.array([5 + 2j, 7 - 3j, -2 + 1j])
Csp = spcreator(C)
Dsp = spcreator(D)
assert_allclose(Csp.multiply(Dsp).toarray(), C * D) # sparse/sparse
assert_allclose(Csp.multiply(D).toarray(), C * D) # sparse/dense
# real/complex
assert_allclose(Asp.multiply(Dsp).toarray(), A * D) # sparse/sparse
assert_allclose(Asp.multiply(D).toarray(), A * D) # sparse/dense
def test_elementwise_multiply_broadcast(self, spcreator):
A = np.array([4])
B = np.array([[-9]])
C = np.array([1, -1, 0])
D = np.array([[7, 9, -9]])
E = np.array([[3], [2], [1]])
F = np.array([[8, 6, 3], [-4, 3, 2], [6, 6, 6]])
G = [1, 2, 3]
H = np.ones((3, 4))
J = H.T
K = np.array([[0]])
L = np.array([[[1, 2], [0, 1]]])
# Some arrays can't be cast as spmatrices (A, C, L) so leave
# them out.
Asp = spcreator(A)
Csp = spcreator(C)
Gsp = spcreator(G)
# 2d arrays
Bsp = spcreator(B)
Dsp = spcreator(D)
Esp = spcreator(E)
Fsp = spcreator(F)
Hsp = spcreator(H)
Hspp = spcreator(H[0, None])
Jsp = spcreator(J)
Jspp = spcreator(J[:, 0, None])
Ksp = spcreator(K)
matrices = [A, B, C, D, E, F, G, H, J, K, L]
spmatrices = [Asp, Bsp, Csp, Dsp, Esp, Fsp, Gsp, Hsp, Hspp, Jsp, Jspp, Ksp]
sp1dmatrices = [Asp, Csp, Gsp]
# sparse/sparse
for i in sp1dmatrices:
for j in spmatrices:
try:
dense_mult = i.toarray() * j.toarray()
except ValueError:
with pytest.raises(ValueError, match='inconsistent shapes'):
i.multiply(j)
continue
sp_mult = i.multiply(j)
assert_allclose(sp_mult.toarray(), dense_mult)
# sparse/dense
for i in sp1dmatrices:
for j in matrices:
try:
dense_mult = i.toarray() * j
except TypeError:
continue
except ValueError:
matchme = 'broadcast together|inconsistent shapes'
with pytest.raises(ValueError, match=matchme):
i.multiply(j)
continue
try:
sp_mult = i.multiply(j)
except ValueError:
continue
assert_allclose(toarray(sp_mult), dense_mult)
def test_elementwise_divide(self, spcreator, dat1d):
datsp = spcreator(dat1d)
expected = np.array([1, np.nan, 1, np.nan])
actual = datsp / datsp
# need assert_array_equal to handle nan values
np.testing.assert_array_equal(actual, expected)
denom = spcreator([1, 0, 0, 4], dtype='d')
expected = [3, np.nan, np.inf, 0]
np.testing.assert_array_equal(datsp / denom, expected)
# complex
A = np.array([1 - 2j, 0 + 5j, -1 + 0j])
B = np.array([5 + 2j, 7 - 3j, -2 + 1j])
Asp = spcreator(A)
Bsp = spcreator(B)
assert_allclose(Asp / Bsp, A / B)
# integer
A = np.array([1, 2, 3])
B = np.array([0, 1, 2])
Asp = spcreator(A)
Bsp = spcreator(B)
with np.errstate(divide='ignore'):
assert_equal(Asp / Bsp, A / B)
# mismatching sparsity patterns
A = np.array([0, 1])
B = np.array([1, 0])
Asp = spcreator(A)
Bsp = spcreator(B)
with np.errstate(divide='ignore', invalid='ignore'):
assert_equal(Asp / Bsp, A / B)
def test_pow(self, spcreator):
A = np.array([1, 0, 2, 0])
B = spcreator(A)
# unusual exponents
with pytest.raises(ValueError, match='negative integer powers'):
B**-1
with pytest.raises(NotImplementedError, match='zero power'):
B**0
for exponent in [1, 2, 3, 2.2]:
ret_sp = B**exponent
ret_np = A**exponent
assert_equal(ret_sp.toarray(), ret_np)
assert_equal(ret_sp.dtype, ret_np.dtype)
def test_dot_scalar(self, spcreator, dat1d):
A = spcreator(dat1d)
scalar = 10
actual = A.dot(scalar)
expected = A * scalar
assert_allclose(actual.toarray(), expected.toarray())
def test_matmul(self, spcreator):
Msp = spcreator([2, 0, 3.0])
B = spcreator(np.array([[0, 1], [1, 0], [0, 2]], 'd'))
col = np.array([[1, 2, 3]]).T
# check sparse @ dense 2d column
assert_allclose(Msp @ col, Msp.toarray() @ col)
# check sparse1d @ sparse2d, sparse1d @ dense2d, dense1d @ sparse2d
assert_allclose((Msp @ B).toarray(), (Msp @ B).toarray())
assert_allclose(Msp.toarray() @ B, (Msp @ B).toarray())
assert_allclose(Msp @ B.toarray(), (Msp @ B).toarray())
# check sparse1d @ dense1d, sparse1d @ sparse1d
V = np.array([0, 0, 1])
assert_allclose(Msp @ V, Msp.toarray() @ V)
Vsp = spcreator(V)
Msp_Vsp = Msp @ Vsp
assert isinstance(Msp_Vsp, np.ndarray)
assert Msp_Vsp.shape == ()
# output is 0-dim ndarray
assert_allclose(np.array(3), Msp_Vsp)
assert_allclose(np.array(3), Msp.toarray() @ Vsp)
assert_allclose(np.array(3), Msp @ Vsp.toarray())
assert_allclose(np.array(3), Msp.toarray() @ Vsp.toarray())
# check error on matrix-scalar
with pytest.raises(ValueError, match='Scalar operands are not allowed'):
Msp @ 1
with pytest.raises(ValueError, match='Scalar operands are not allowed'):
1 @ Msp
def test_sub_dense(self, spcreator, datsp_math_dtypes):
# subtracting a dense matrix to/from a sparse matrix
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
# Manually add to avoid upcasting from scalar
# multiplication.
sum1 = (dat + dat + dat) - datsp
assert_equal(sum1, dat + dat)
sum2 = (datsp + datsp + datsp) - dat
assert_equal(sum2, dat + dat)
def test_size_zero_matrix_arithmetic(self, spcreator):
# Test basic matrix arithmetic with shapes like 0, (1, 0), (0, 3), etc.
mat = np.array([])
a = mat.reshape(0)
d = mat.reshape((1, 0))
f = np.ones([5, 5])
asp = spcreator(a)
dsp = spcreator(d)
# bad shape for addition
with pytest.raises(ValueError, match='inconsistent shapes'):
asp.__add__(dsp)
# matrix product.
assert_equal(asp.dot(asp), np.dot(a, a))
# bad matrix products
with pytest.raises(ValueError, match='dimension mismatch|shapes.*not aligned'):
asp.dot(f)
# elemente-wise multiplication
assert_equal(asp.multiply(asp).toarray(), np.multiply(a, a))
assert_equal(asp.multiply(a).toarray(), np.multiply(a, a))
assert_equal(asp.multiply(6).toarray(), np.multiply(a, 6))
# bad element-wise multiplication
with pytest.raises(ValueError, match='inconsistent shapes'):
asp.multiply(f)
# Addition
assert_equal(asp.__add__(asp).toarray(), a.__add__(a))
| TestArithmetic1D |
python | openai__openai-python | src/openai/types/beta/threads/image_url_delta.py | {
"start": 219,
"end": 582
} | class ____(BaseModel):
detail: Optional[Literal["auto", "low", "high"]] = None
"""Specifies the detail level of the image.
`low` uses fewer tokens, you can opt in to high resolution using `high`.
"""
url: Optional[str] = None
"""
The URL of the image, must be a supported image types: jpeg, jpg, png, gif,
webp.
"""
| ImageURLDelta |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 1700,
"end": 1784
} | class ____(ABCMeta): # safe
def method(self):
foo()
| non_keyword_abcmeta_1 |
python | apache__airflow | providers/standard/tests/unit/standard/triggers/test_file.py | {
"start": 2381,
"end": 3697
} | class ____:
FILE_PATH = "/files/dags/example_async_file.py"
def test_serialization(self):
"""Asserts that the trigger correctly serializes its arguments and classpath."""
trigger = FileDeleteTrigger(filepath=self.FILE_PATH, poll_interval=5)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.standard.triggers.file.FileDeleteTrigger"
assert kwargs == {
"filepath": self.FILE_PATH,
"poke_interval": 5,
}
@pytest.mark.asyncio
async def test_file_delete_trigger(self, tmp_path):
"""Asserts that the trigger goes off on or after file is found and that the files gets deleted."""
tmp_dir = tmp_path / "test_dir"
tmp_dir.mkdir()
p = tmp_dir / "hello.txt"
trigger = FileDeleteTrigger(
filepath=str(p.resolve()),
poke_interval=0.2,
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# It should not have produced a result
assert task.done() is False
p.touch()
await asyncio.sleep(0.5)
assert p.exists() is False
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
| TestFileDeleteTrigger |
python | neetcode-gh__leetcode | python/0004-median-of-two-sorted-arrays.py | {
"start": 25,
"end": 1036
} | class ____:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
A, B = nums1, nums2
total = len(nums1) + len(nums2)
half = total // 2
if len(B) < len(A):
A, B = B, A
l, r = 0, len(A) - 1
while True:
i = (l + r) // 2 # A
j = half - i - 2 # B
Aleft = A[i] if i >= 0 else float("-infinity")
Aright = A[i + 1] if (i + 1) < len(A) else float("infinity")
Bleft = B[j] if j >= 0 else float("-infinity")
Bright = B[j + 1] if (j + 1) < len(B) else float("infinity")
# partition is correct
if Aleft <= Bright and Bleft <= Aright:
# odd
if total % 2:
return min(Aright, Bright)
# even
return (max(Aleft, Bleft) + min(Aright, Bright)) / 2
elif Aleft > Bright:
r = i - 1
else:
l = i + 1
| Solution |
python | PyCQA__pylint | tests/reporters/unittest_reporting.py | {
"start": 5344,
"end": 12985
} | class ____(BaseReporter):
name = "nop-reporter"
extension = ""
def __init__(self, output: TextIO | None = None) -> None:
super().__init__(output)
print("A NopReporter was initialized.", file=self.out)
def writeln(self, string: str = "") -> None:
pass
def _display(self, layout: Section) -> None:
pass
def test_multi_format_output(tmp_path: Path) -> None:
text = StringIO(newline=None)
json = tmp_path / "somefile.json"
source_file = tmp_path / "somemodule.py"
source_file.write_text('NOT_EMPTY = "This module is not empty"\n')
dumps(str(source_file))
nop_format = NopReporter.__module__ + "." + NopReporter.__name__
formats = ",".join(["json2:" + str(json), "text", nop_format])
with redirect_stdout(text):
linter = PyLinter()
linter.load_default_plugins()
linter.set_option("persistent", False)
linter.set_option("reports", True)
linter.set_option("score", True)
linter.set_option("score", True)
linter.set_option("output-format", formats)
assert linter.reporter.linter is linter
with pytest.raises(NotImplementedError):
linter.reporter.out = text
linter.open()
linter.check_single_file_item(
FileItem("somemodule", str(source_file), "somemodule")
)
linter.add_message("line-too-long", line=1, args=(1, 2))
linter.generate_reports()
linter.reporter.writeln("direct output")
# Ensure the output files are flushed and closed
assert isinstance(linter.reporter, MultiReporter)
linter.reporter.close_output_files()
del linter.reporter
with open(json, encoding="utf-8") as f:
assert '"messageId": "C0114"' in f.read()
assert (
text.getvalue() == "A NopReporter was initialized.\n"
"************* Module somemodule\n"
f"{source_file}:1:0: C0114: Missing module docstring (missing-module-docstring)\n"
f"{source_file}:1:0: C0301: Line too long (1/2) (line-too-long)\n"
"\n"
"\n"
"Report\n"
"======\n"
"1 statements analysed.\n"
"\n"
"Statistics by type\n"
"------------------\n"
"\n"
"+---------+-------+-----------+-----------+------------+---------+\n"
"|type |number |old number |difference |%documented |%badname |\n"
"+=========+=======+===========+===========+============+=========+\n"
"|module |1 |NC |NC |0.00 |0.00 |\n"
"+---------+-------+-----------+-----------+------------+---------+\n"
"|class |0 |NC |NC |0 |0 |\n"
"+---------+-------+-----------+-----------+------------+---------+\n"
"|method |0 |NC |NC |0 |0 |\n"
"+---------+-------+-----------+-----------+------------+---------+\n"
"|function |0 |NC |NC |0 |0 |\n"
"+---------+-------+-----------+-----------+------------+---------+\n"
"\n"
"\n"
"\n"
"3 lines have been analyzed\n"
"\n"
"Raw metrics\n"
"-----------\n"
"\n"
"+----------+-------+------+---------+-----------+\n"
"|type |number |% |previous |difference |\n"
"+==========+=======+======+=========+===========+\n"
"|code |2 |66.67 |NC |NC |\n"
"+----------+-------+------+---------+-----------+\n"
"|docstring |0 |0.00 |NC |NC |\n"
"+----------+-------+------+---------+-----------+\n"
"|comment |0 |0.00 |NC |NC |\n"
"+----------+-------+------+---------+-----------+\n"
"|empty |1 |33.33 |NC |NC |\n"
"+----------+-------+------+---------+-----------+\n"
"\n"
"\n"
"\n"
"Duplication\n"
"-----------\n"
"\n"
"+-------------------------+------+---------+-----------+\n"
"| |now |previous |difference |\n"
"+=========================+======+=========+===========+\n"
"|nb duplicated lines |0 |NC |NC |\n"
"+-------------------------+------+---------+-----------+\n"
"|percent duplicated lines |0.000 |NC |NC |\n"
"+-------------------------+------+---------+-----------+\n"
"\n"
"\n"
"\n"
"Messages by category\n"
"--------------------\n"
"\n"
"+-----------+-------+---------+-----------+\n"
"|type |number |previous |difference |\n"
"+===========+=======+=========+===========+\n"
"|convention |2 |NC |NC |\n"
"+-----------+-------+---------+-----------+\n"
"|refactor |0 |NC |NC |\n"
"+-----------+-------+---------+-----------+\n"
"|warning |0 |NC |NC |\n"
"+-----------+-------+---------+-----------+\n"
"|error |0 |NC |NC |\n"
"+-----------+-------+---------+-----------+\n"
"\n"
"\n"
"\n"
"Messages\n"
"--------\n"
"\n"
"+-------------------------+------------+\n"
"|message id |occurrences |\n"
"+=========================+============+\n"
"|missing-module-docstring |1 |\n"
"+-------------------------+------------+\n"
"|line-too-long |1 |\n"
"+-------------------------+------------+\n"
"\n"
"\n"
"\n"
"\n"
"-----------------------------------\n"
"Your code has been rated at 0.00/10\n"
"\n"
"direct output\n"
)
def test_multi_reporter_independant_messages() -> None:
"""Messages should not be modified by multiple reporters."""
check_message = "Not modified"
class ReporterModify(BaseReporter):
def handle_message(self, msg: Message) -> None:
msg.msg = "Modified message"
def writeln(self, string: str = "") -> None:
pass
def _display(self, layout: Section) -> None:
pass
class ReporterCheck(BaseReporter):
def handle_message(self, msg: Message) -> None:
assert (
msg.msg == check_message
), "Message object should not be changed by other reporters."
def writeln(self, string: str = "") -> None:
pass
def _display(self, layout: Section) -> None:
pass
multi_reporter = MultiReporter([ReporterModify(), ReporterCheck()], lambda: None)
message = Message(
symbol="missing-docstring",
msg_id="C0123",
location=MessageLocationTuple("abspath", "path", "module", "obj", 1, 2, 1, 3),
msg=check_message,
confidence=HIGH,
)
multi_reporter.handle_message(message)
assert (
message.msg == check_message
), "Message object should not be changed by reporters."
def test_display_results_is_renamed() -> None:
class CustomReporter(TextReporter):
def _display(self, layout: Section) -> None:
return None
reporter = CustomReporter()
with pytest.raises(AttributeError) as exc:
# pylint: disable=no-member
reporter.display_results() # type: ignore[attr-defined]
assert "no attribute 'display_results'" in str(exc)
| NopReporter |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 89734,
"end": 91299
} | class ____(BasePostProgressGroupMixin):
def test_set_has_flags(
self, mock_dist: MagicMock, mock_incr: MagicMock, mock_record: MagicMock
) -> None:
project = self.create_project(platform="other")
event_id = "a" * 32
event = self.create_event(
data={
"event_id": event_id,
"contexts": {
"flags": {
"values": [
{
"flag": "test-flag-1",
"result": False,
},
{
"flag": "test-flag-2",
"result": True,
},
]
}
},
},
project_id=project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
)
project.refresh_from_db()
assert project.flags.has_flags
mock_incr.assert_any_call("feature_flags.event_has_flags_context")
mock_dist.assert_any_call("feature_flags.num_flags_sent", 2)
assert_last_analytics_event(
mock_record,
FirstFlagSentEvent(
organization_id=self.organization.id,
project_id=project.id,
platform=project.platform,
),
)
| CheckIfFlagsSentTestMixin |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/dagster_run.py | {
"start": 1923,
"end": 4184
} | class ____(Enum):
"""The status of run execution."""
# Runs waiting to be launched by the Dagster Daemon.
QUEUED = "QUEUED"
# Runs in the brief window between creating the run and launching or enqueueing it.
NOT_STARTED = "NOT_STARTED"
# Runs that are managed outside of the Dagster control plane.
MANAGED = "MANAGED"
# Runs that have been launched, but execution has not yet started.
STARTING = "STARTING"
# Runs that have been launched and execution has started.
STARTED = "STARTED"
# Runs that have successfully completed.
SUCCESS = "SUCCESS"
# Runs that have failed to complete.
FAILURE = "FAILURE"
# Runs that are in-progress and pending to be canceled.
CANCELING = "CANCELING"
# Runs that have been canceled before completion.
CANCELED = "CANCELED"
# These statuses that indicate a run may be using compute resources
IN_PROGRESS_RUN_STATUSES = [
DagsterRunStatus.STARTING,
DagsterRunStatus.STARTED,
DagsterRunStatus.CANCELING,
]
# This serves as an explicit list of run statuses that indicate that the run is not using compute
# resources. This and the enum above should cover all run statuses.
NON_IN_PROGRESS_RUN_STATUSES = [
DagsterRunStatus.QUEUED,
DagsterRunStatus.NOT_STARTED,
DagsterRunStatus.SUCCESS,
DagsterRunStatus.FAILURE,
DagsterRunStatus.MANAGED,
DagsterRunStatus.CANCELED,
]
FINISHED_STATUSES = [
DagsterRunStatus.SUCCESS,
DagsterRunStatus.FAILURE,
DagsterRunStatus.CANCELED,
]
NOT_FINISHED_STATUSES = [
DagsterRunStatus.STARTING,
DagsterRunStatus.STARTED,
DagsterRunStatus.CANCELING,
DagsterRunStatus.QUEUED,
DagsterRunStatus.NOT_STARTED,
]
# Run statuses for runs that can be safely canceled.
# Does not include the other unfinished statuses for the following reasons:
# STARTING: Control has been ceded to the run worker, which will eventually move the run to a STARTED.
# NOT_STARTED: Mostly replaced with STARTING. Runs are only here in the brief window between
# creating the run and launching or enqueueing it.
CANCELABLE_RUN_STATUSES = [DagsterRunStatus.STARTED, DagsterRunStatus.QUEUED]
@whitelist_for_serdes(storage_name="PipelineRunStatsSnapshot")
@record
| DagsterRunStatus |
python | rapidsai__cudf | python/custreamz/custreamz/kafka.py | {
"start": 276,
"end": 2096
} | class ____:
def __init__(self, kafka_configs):
"""
Base object for any client that wants to interact with a Kafka broker.
This object creates the underlying KafkaDatasource connection which
is used to read data from Kafka and create cudf Dataframes.
This class should not be directly instantiated.
Parameters
----------
kafka_configs : dict,
Dict of Key/Value pairs of librdkafka
configuration values. Full list of valid configuration
options can be found at
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
"""
self.kafka_configs = kafka_configs
self.kafka_meta_client = KafkaDatasource(kafka_configs)
self.ck_consumer = ck.Consumer(kafka_configs)
def list_topics(self, specific_topic=None):
"""
List the topics associated with the underlying Kafka Broker connection.
Parameters
----------
specific_topic : str,
If specified this is the only topic that metadata information will
be retrieved for. Otherwise metadata for all topics in the
broker will be retrieved.
"""
return self.kafka_meta_client.list_topics(
b"" if specific_topic is None else specific_topic.encode()
)
def unsubscribe(self):
"""
Stop all active consumption and remove consumer subscriptions
to topic/partition instances
"""
self.kafka_meta_client.unsubscribe()
def close(self, timeout=10000):
"""
Close the underlying socket connection to Kafka and
clean up system resources
"""
self.kafka_meta_client.close(timeout)
# Apache Kafka Consumer implementation
| CudfKafkaClient |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 10117,
"end": 10757
} | class ____:
param_names = ["shapes", "binary_op"]
params = [
get_benchmark_shapes("TimeBinaryOpSeries"),
["mul"],
]
def setup(self, shapes, binary_op):
df1 = generate_dataframe("int", *shapes[0], RAND_LOW, RAND_HIGH)
df2 = generate_dataframe("int", *shapes[1], RAND_LOW, RAND_HIGH)
self.series1 = df1[df1.columns[0]]
self.series2 = df2[df2.columns[0]]
self.op = getattr(self.series1, binary_op)
execute(self.series1)
execute(self.series2)
def time_binary_op_series(self, shapes, binary_op):
execute(self.op(self.series2))
| TimeBinaryOpSeries |
python | pytorch__pytorch | torch/package/_directory_reader.py | {
"start": 390,
"end": 1915
} | class ____:
"""
Class to allow PackageImporter to operate on unzipped packages. Methods
copy the behavior of the internal PyTorchFileReader class (which is used for
accessing packages in all other cases).
N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader
class due to ScriptObjects requiring an actual PyTorchFileReader instance.
"""
def __init__(self, directory):
self.directory = directory
def get_record(self, name):
filename = f"{self.directory}/{name}"
with open(filename, "rb") as f:
return f.read()
def get_storage_from_record(self, name, numel, dtype):
filename = f"{self.directory}/{name}"
nbytes = torch._utils._element_size(dtype) * numel
storage = cast(Storage, torch.UntypedStorage)
return _HasStorage(storage.from_file(filename=filename, nbytes=nbytes))
def has_record(self, path):
full_path = os.path.join(self.directory, path)
return os.path.isfile(full_path)
def get_all_records(
self,
):
files = [
filename[len(self.directory) + 1 :]
for filename in glob(f"{self.directory}/**", recursive=True)
if not os.path.isdir(filename)
]
return files
def serialization_id(
self,
):
if self.has_record(__serialization_id_record_name__):
return self.get_record(__serialization_id_record_name__)
else:
return ""
| DirectoryReader |
python | geekcomputers__Python | linear-algebra-python/src/tests.py | {
"start": 234,
"end": 5124
} | class ____(unittest.TestCase):
def test_component(self):
"""
test for method component
"""
x = Vector([1, 2, 3])
self.assertEqual(x.component(0), 1)
self.assertEqual(x.component(2), 3)
try:
y = Vector()
self.assertTrue(False)
except:
self.assertTrue(True)
def test_str(self):
"""
test for toString() method
"""
x = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(x.__str__(), "(0,0,0,0,0,1)")
def test_size(self):
"""
test for size()-method
"""
x = Vector([1, 2, 3, 4])
self.assertEqual(x.size(), 4)
def test_euclidLength(self):
"""
test for the eulidean length
"""
x = Vector([1, 2])
self.assertAlmostEqual(x.eulidLength(), 2.236, 3)
def test_add(self):
"""
test for + operator
"""
x = Vector([1, 2, 3])
y = Vector([1, 1, 1])
self.assertEqual((x + y).component(0), 2)
self.assertEqual((x + y).component(1), 3)
self.assertEqual((x + y).component(2), 4)
def test_sub(self):
"""
test for - operator
"""
x = Vector([1, 2, 3])
y = Vector([1, 1, 1])
self.assertEqual((x - y).component(0), 0)
self.assertEqual((x - y).component(1), 1)
self.assertEqual((x - y).component(2), 2)
def test_mul(self):
"""
test for * operator
"""
x = Vector([1, 2, 3])
a = Vector([2, -1, 4]) # for test of dot-product
b = Vector([1, -2, -1])
self.assertEqual((x * 3.0).__str__(), "(3.0,6.0,9.0)")
self.assertEqual((a * b), 0)
def test_zeroVector(self):
"""
test for the global function zeroVector(...)
"""
self.assertTrue(zeroVector(10).__str__().count("0") == 10)
def test_unitBasisVector(self):
"""
test for the global function unitBasisVector(...)
"""
self.assertEqual(unitBasisVector(3, 1).__str__(), "(0,1,0)")
def test_axpy(self):
"""
test for the global function axpy(...) (operation)
"""
x = Vector([1, 2, 3])
y = Vector([1, 0, 1])
self.assertEqual(axpy(2, x, y).__str__(), "(3,4,7)")
def test_copy(self):
"""
test for the copy()-method
"""
x = Vector([1, 0, 0, 0, 0, 0])
y = x.copy()
self.assertEqual(x.__str__(), y.__str__())
def test_changeComponent(self):
"""
test for the changeComponent(...)-method
"""
x = Vector([1, 0, 0])
x.changeComponent(0, 0)
x.changeComponent(1, 1)
self.assertEqual(x.__str__(), "(0,1,0)")
def test_str_matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", A.__str__())
def test__mul__matrix(self):
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
x = Vector([1, 2, 3])
self.assertEqual("(14,32,50)", (A * x).__str__())
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", (A * 2).__str__())
def test_changeComponent_matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
A.changeComponent(0, 2, 5)
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", A.__str__())
def test_component_matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual(7, A.component(2, 1), 0.01)
def test__add__matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", (A + B).__str__())
def test__sub__matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", (A - B).__str__())
def test_squareZeroMatrix(self):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|" + "\n|0,0,0,0,0|\n",
squareZeroMatrix(5).__str__(),
)
def test_norm_vector(self):
x = Vector([1, 2, 3])
self.assertAlmostEqual(x.norm().component(0), (1 / math.sqrt(14)), 0.001)
self.assertAlmostEqual(x.norm().component(1), math.sqrt((2.0 / 7)), 0.001)
def test__eq__vector(self):
x = Vector([1, 2, 3])
y = Vector([1, 0, 1])
self.assertTrue(x == x)
self.assertFalse(x == y)
def test__eq__matrix(self):
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertTrue(A == A)
self.assertFalse(A == B)
if __name__ == "__main__":
unittest.main()
| Test |
python | getsentry__sentry | tests/sentry/middleware/test_customer_domain.py | {
"start": 7212,
"end": 7961
} | class ____(Endpoint):
permission_classes = (AllowAny,)
def get(self, request, organization_id_or_slug):
return Response(
{
"organization_id_or_slug": organization_id_or_slug,
"subdomain": request.subdomain,
"activeorg": request.session.get("activeorg", None),
}
)
def post(self, request, organization_id_or_slug):
request.session["activeorg"] = organization_id_or_slug
return Response(
{
"organization_id_or_slug": organization_id_or_slug,
"subdomain": request.subdomain,
"activeorg": request.session.get("activeorg", None),
}
)
| OrganizationTestEndpoint |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 12720,
"end": 14257
} | class ____:
__slots__ = ("_role_class", "name", "_use_inspection")
def _literal_coercion(self, element, **kw):
raise NotImplementedError()
_post_coercion: Any = None
_resolve_literal_only = False
_skip_clauseelement_for_target_match = False
def __init__(self, role_class):
self._role_class = role_class
self.name = role_class._role_name
self._use_inspection = issubclass(role_class, roles.UsesInspection)
def _implicit_coercions(
self,
element: Any,
resolved: Any,
argname: Optional[str] = None,
**kw: Any,
) -> Any:
self._raise_for_expected(element, argname, resolved)
def _raise_for_expected(
self,
element: Any,
argname: Optional[str] = None,
resolved: Optional[Any] = None,
*,
advice: Optional[str] = None,
code: Optional[str] = None,
err: Optional[Exception] = None,
**kw: Any,
) -> NoReturn:
if resolved is not None and resolved is not element:
got = "%r object resolved from %r object" % (resolved, element)
else:
got = repr(element)
if argname:
msg = "%s expected for argument %r; got %s." % (
self.name,
argname,
got,
)
else:
msg = "%s expected, got %s." % (self.name, got)
if advice:
msg += " " + advice
raise exc.ArgumentError(msg, code=code) from err
| RoleImpl |
python | eventlet__eventlet | eventlet/green/http/client.py | {
"start": 58888,
"end": 59137
} | class ____(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
| RemoteDisconnected |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_cairo.py | {
"start": 2432,
"end": 10819
} | class ____(RendererBase):
def __init__(self, dpi):
self.dpi = dpi
self.gc = GraphicsContextCairo(renderer=self)
self.width = None
self.height = None
self.text_ctx = cairo.Context(
cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
super().__init__()
def set_context(self, ctx):
surface = ctx.get_target()
if hasattr(surface, "get_width") and hasattr(surface, "get_height"):
size = surface.get_width(), surface.get_height()
elif hasattr(surface, "get_extents"): # GTK4 RecordingSurface.
ext = surface.get_extents()
size = ext.width, ext.height
else: # vector surfaces.
ctx.save()
ctx.reset_clip()
rect, *rest = ctx.copy_clip_rectangle_list()
if rest:
raise TypeError("Cannot infer surface size")
_, _, *size = rect
ctx.restore()
self.gc.ctx = ctx
self.width, self.height = size
@staticmethod
def _fill_and_stroke(ctx, fill_c, alpha, alpha_overrides):
if fill_c is not None:
ctx.save()
_set_rgba(ctx, fill_c, alpha, alpha_overrides)
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
ctx = gc.ctx
# Clip the path to the actual rendering extents if it isn't filled.
clip = (ctx.clip_extents()
if rgbFace is None and gc.get_hatch() is None
else None)
transform = (transform
+ Affine2D().scale(1, -1).translate(0, self.height))
ctx.new_path()
_append_path(ctx, path, transform, clip)
if rgbFace is not None:
ctx.save()
_set_rgba(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
ctx.fill_preserve()
ctx.restore()
hatch_path = gc.get_hatch_path()
if hatch_path:
dpi = int(self.dpi)
hatch_surface = ctx.get_target().create_similar(
cairo.Content.COLOR_ALPHA, dpi, dpi)
hatch_ctx = cairo.Context(hatch_surface)
_append_path(hatch_ctx, hatch_path,
Affine2D().scale(dpi, -dpi).translate(0, dpi),
None)
hatch_ctx.set_line_width(self.points_to_pixels(gc.get_hatch_linewidth()))
hatch_ctx.set_source_rgba(*gc.get_hatch_color())
hatch_ctx.fill_preserve()
hatch_ctx.stroke()
hatch_pattern = cairo.SurfacePattern(hatch_surface)
hatch_pattern.set_extend(cairo.Extend.REPEAT)
ctx.save()
ctx.set_source(hatch_pattern)
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
def draw_markers(self, gc, marker_path, marker_trans, path, transform,
rgbFace=None):
# docstring inherited
ctx = gc.ctx
ctx.new_path()
# Create the path for the marker; it needs to be flipped here already!
_append_path(ctx, marker_path, marker_trans + Affine2D().scale(1, -1))
marker_path = ctx.copy_path_flat()
# Figure out whether the path has a fill
x1, y1, x2, y2 = ctx.fill_extents()
if x1 == 0 and y1 == 0 and x2 == 0 and y2 == 0:
filled = False
# No fill, just unset this (so we don't try to fill it later on)
rgbFace = None
else:
filled = True
transform = (transform
+ Affine2D().scale(1, -1).translate(0, self.height))
ctx.new_path()
for i, (vertices, codes) in enumerate(
path.iter_segments(transform, simplify=False)):
if len(vertices):
x, y = vertices[-2:]
ctx.save()
# Translate and apply path
ctx.translate(x, y)
ctx.append_path(marker_path)
ctx.restore()
# Slower code path if there is a fill; we need to draw
# the fill and stroke for each marker at the same time.
# Also flush out the drawing every once in a while to
# prevent the paths from getting way too long.
if filled or i % 1000 == 0:
self._fill_and_stroke(
ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
# Fast path, if there is no fill, draw everything in one step
if not filled:
self._fill_and_stroke(
ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_image(self, gc, x, y, im):
im = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(im[::-1])
surface = cairo.ImageSurface.create_for_data(
im.ravel().data, cairo.FORMAT_ARGB32,
im.shape[1], im.shape[0], im.shape[1] * 4)
ctx = gc.ctx
y = self.height - y - im.shape[0]
ctx.save()
ctx.set_source_surface(surface, float(x), float(y))
ctx.paint()
ctx.restore()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
# Note: (x, y) are device/display coords, not user-coords, unlike other
# draw_* methods
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to(x, y)
ctx.save()
ctx.select_font_face(*_cairo_font_args_from_font_prop(prop))
ctx.set_font_size(self.points_to_pixels(prop.get_size_in_points()))
opts = cairo.FontOptions()
opts.set_antialias(gc.get_antialiased())
ctx.set_font_options(opts)
if angle:
ctx.rotate(np.deg2rad(-angle))
ctx.show_text(s)
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ctx = gc.ctx
width, height, descent, glyphs, rects = \
self._text2path.mathtext_parser.parse(s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate(np.deg2rad(-angle))
for font, fontsize, idx, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, -oy)
ctx.select_font_face(
*_cairo_font_args_from_font_prop(ttfFontProperty(font)))
ctx.set_font_size(self.points_to_pixels(fontsize))
ctx.show_text(chr(idx))
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle(ox, -oy, w, -h)
ctx.set_source_rgb(0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath == 'TeX':
return super().get_text_width_height_descent(s, prop, ismath)
if ismath:
width, height, descent, *_ = \
self._text2path.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.save()
ctx.select_font_face(*_cairo_font_args_from_font_prop(prop))
ctx.set_font_size(self.points_to_pixels(prop.get_size_in_points()))
y_bearing, w, h = ctx.text_extents(s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
# docstring inherited
self.gc.ctx.save()
# FIXME: The following doesn't properly implement a stack-like behavior
# and relies instead on the (non-guaranteed) fact that artists never
# rely on nesting gc states, so directly resetting the attributes (IOW
# a single-level stack) is enough.
self.gc._alpha = 1
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
self.gc._hatch = None
return self.gc
def points_to_pixels(self, points):
# docstring inherited
return points / 72 * self.dpi
| RendererCairo |
python | google__pytype | pytype/abstract/_special_classes.py | {
"start": 3310,
"end": 4411
} | class ____(_Builder):
"""Build a typed dict."""
# TODO: b/350643999 - Should rather be a ClassVar[Sequence[str]]
CLASSES: Sequence[str] = ("typing.TypedDict", "typing_extensions.TypedDict")
def matches_class(self, c: "_classes.PyTDClass") -> bool:
return c.name in self.CLASSES
def matches_base(self, c: "_classes.PyTDClass") -> bool:
return any(
isinstance(b, pytd.ClassType) and self.matches_class(b) for b in c.bases # pytype: disable=attribute-error
)
def matches_mro(self, c: "_classes.PyTDClass") -> bool:
# Check if we have typed dicts in the MRO by seeing if we have already
# created a TypedDictClass for one of the ancestor classes.
return any(
isinstance(b, class_mixin.Class) and b.is_typed_dict_class
for b in c.mro
)
def make_base_class(self) -> "typed_dict.TypedDictBuilder":
return self.convert.make_typed_dict_builder()
def make_derived_class(
self, name: str, pytd_cls: "_classes.PyTDClass"
) -> "typed_dict.TypedDictClass":
return self.convert.make_typed_dict(name, pytd_cls)
| _TypedDictBuilder |
python | PyCQA__pylint | doc/data/messages/i/init-is-generator/good.py | {
"start": 0,
"end": 211
} | class ____:
def __init__(self, worms):
self.__worms = worms
def worms(self):
yield from self.__worms
apple = Fruit(["Fahad", "Anisha", "Tabatha"])
for worm in apple.worms():
pass
| Fruit |
python | django__django | tests/admin_views/models.py | {
"start": 16205,
"end": 16328
} | class ____(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
| Topping |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 11403,
"end": 11843
} | class ____:
"""Used in AssetEventResult."""
dag_id: str
task_id: str
run_id: str
map_index: int
def xcom_pull(
self,
*,
key: str = "return_value",
default: Any = None,
) -> Any:
from airflow.sdk.execution_time.xcom import XCom
if (value := XCom.get_value(ti_key=self, key=key)) is None:
return default
return value
| AssetEventSourceTaskInstance |
python | getsentry__sentry | src/sentry/integrations/on_call/metrics.py | {
"start": 458,
"end": 1155
} | class ____(Enum):
"""
A way in which a user can interact with Sentry through an on-call app.
"""
# General interactions
ADD_KEY = "ADD_KEY"
POST_INSTALL = "POST_INSTALL"
# Interacting with external alerts
CREATE = "CREATE" # create an alert in Opsgenie/Pagerduty
RESOLVE = "RESOLVE" # resolve an alert in Opsgenie/Pagerduty
# Opsgenie only
VERIFY_KEYS = "VERIFY_KEYS"
VERIFY_TEAM = "VERIFY_TEAM"
MIGRATE_PLUGIN = "MIGRATE_PLUGIN"
# PagerDuty only
VALIDATE_SERVICE = "VALIDATE_SERVICE"
INSTALLATION_REDIRECT = "INSTALLATION_REDIRECT"
def __str__(self) -> str:
return self.value.lower()
@dataclass
| OnCallInteractionType |
python | walkccc__LeetCode | solutions/2471. Minimum Number of Operations to Sort a Binary Tree by Level/2471.py | {
"start": 0,
"end": 857
} | class ____:
def minimumOperations(self, root: TreeNode | None) -> int:
ans = 0
q = collections.deque([root])
# e.g. vals = [7, 6, 8, 5]
# [2, 1, 3, 0]: Initialize the ids based on the order of vals.
# [3, 1, 2, 0]: Swap 2 with 3, so 2 is in the right place (i == ids[i]).
# [0, 1, 2, 3]: Swap 3 with 0, so 3 is in the right place.
while q:
vals = []
for _ in range(len(q)):
node = q.popleft()
vals.append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
# O(n^2logn), which is not great and leads to TLE.
ids = [sorted(vals).index(val) for val in vals]
for i in range(len(ids)):
while ids[i] != i:
j = ids[i]
ids[i] = ids[j]
ids[j] = j
ans += 1
return ans
| Solution |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_ignored_modules.py | {
"start": 1099,
"end": 1965
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(3, 5)
layer1_modules = [
torch.nn.Linear(5, 4),
torch.nn.Linear(4, 4),
torch.nn.Linear(4, 4),
]
self.layer1 = torch.nn.Sequential(*layer1_modules)
self.layer2 = torch.nn.Linear(4, 2)
self.layer3 = torch.nn.Linear(2, 2)
self.relu = torch.nn.ReLU()
def forward(self, x):
z = self.relu(self.layer0(x))
z = self.relu(self.layer1(z))
z = self.relu(self.layer2(z))
z = self.relu(self.layer3(z))
return z
def get_input(self, device):
return (torch.randn((8, 3)).to(device),)
def get_loss(self, input, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
| Model |
python | justquick__django-activity-stream | runtests/testapp/apps.py | {
"start": 42,
"end": 398
} | class ____(AppConfig):
name = 'testapp'
def ready(self):
from actstream.registry import register
register(apps.get_model('auth', 'group'))
register(apps.get_model('sites', 'site'))
register(self.get_model('player'))
myuser = self.get_model('myuser')
if myuser:
register(myuser)
| TestappConfig |
python | GoogleCloudPlatform__python-docs-samples | recaptcha_enterprise/demosite/app/urls.py | {
"start": 1487,
"end": 1662
} | class ____(enum.Enum):
INVALID_TOKEN = "Invalid token"
ACTION_MISMATCH = "Action mismatch"
SCORE_LESS_THAN_THRESHOLD = "Returned score less than threshold set"
| Error |
python | wandb__wandb | wandb/sdk/artifacts/_generated/registry_versions.py | {
"start": 377,
"end": 531
} | class ____(GQLResult):
org_entity: Optional[RegistryVersionsOrganizationOrgEntity] = Field(
alias="orgEntity"
)
| RegistryVersionsOrganization |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 957027,
"end": 960420
} | class ____(Predicate):
"""
FieldLTPredicate schema wrapper.
Parameters
----------
field : str, :class:`FieldName`
Field to be tested.
lt : str, dict, float, :class:`ExprRef`, :class:`DateTime`
The value that the field should be less than.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit for the field to be tested.
"""
_schema = {"$ref": "#/definitions/FieldLTPredicate"}
def __init__(
self,
field: Optional[str | SchemaBase] = Undefined,
lt: Optional[str | float | Temporal | Parameter | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
**kwds,
):
super().__init__(field=field, lt=lt, timeUnit=timeUnit, **kwds)
| FieldLTPredicate |
python | pytorch__pytorch | torch/cuda/memory.py | {
"start": 48828,
"end": 51180
} | class ____(_CUDAAllocator):
r"""CUDA memory allocator loaded from a so file."""
def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str):
r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes.
To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function.
Args:
path_to_so_file(str): Path in the filesystem to the `.so` file containing
the allocator functions
alloc_fn_name(str): Name of the function to perform the memory allocation
in the so file. The signature must be:
void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream);
free_fn_name(str): Name of the function to perform the memory release
in the so file. The signature must be:
void free_fn_name(void* ptr, size_t size, cudaStream_t stream);
.. warning::
This is currently supported only in unix OSs
.. note::
See :ref:`cuda-memory-management` for details on creating and using a custom allocator
"""
allocator = ctypes.CDLL(path_to_so_file)
alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value
free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value
assert alloc_fn is not None
assert free_fn is not None
self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn)
def change_current_allocator(allocator: _CUDAAllocator) -> None:
r"""Change the currently used memory allocator to be the one provided.
If the current allocator has already been used/initialized, this function will error.
Args:
allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one.
.. note::
See :ref:`cuda-memory-management` for details on creating and using a custom allocator
"""
torch._C._cuda_changeCurrentAllocator(allocator.allocator())
def _get_current_allocator() -> _CUDAAllocator:
r"""Return the allocator being currently used.
.. note::
See :ref:`cuda-memory-management` for details on creating and using a custom allocator
"""
return _CUDAAllocator(torch._C._cuda_getAllocator())
| CUDAPluggableAllocator |
python | fluentpython__example-code-2e | 06-obj-ref/haunted_bus.py | {
"start": 717,
"end": 1039
} | class ____:
"""A bus model haunted by ghost passengers"""
def __init__(self, passengers=[]): # <1>
self.passengers = passengers # <2>
def pick(self, name):
self.passengers.append(name) # <3>
def drop(self, name):
self.passengers.remove(name)
# end::HAUNTED_BUS_CLASS[]
| HauntedBus |
python | huggingface__transformers | tests/models/speech_to_text/test_modeling_speech_to_text.py | {
"start": 2138,
"end": 10055
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
num_conv_layers=2,
conv_kernel_sizes=(5, 5),
conv_channels=32,
input_feat_per_channel=24,
input_channels=1,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_source_positions=20,
max_target_positions=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_features = floats_tensor(
[self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size
)
attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2)
config = self.get_config()
inputs_dict = prepare_speech_to_text_inputs_dict(
config,
input_features=input_features,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
)
return config, inputs_dict
def get_config(self):
return Speech2TextConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
num_conv_layers=self.num_conv_layers,
conv_kernel_sizes=self.conv_kernel_sizes,
conv_channels=self.conv_channels,
input_feat_per_channel=self.input_feat_per_channel,
input_channels=self.input_channels,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def create_and_check_model_forward(self, config, inputs_dict):
model = Speech2TextModel(config=config).to(torch_device).eval()
input_features = inputs_dict["input_features"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
# first forward pass
last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16))
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["decoder_input_ids"]
attention_mask = inputs_dict["decoder_attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = Speech2TextModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(
inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"]
)[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device)
encoder_attention_mask = encoder._get_feature_vector_attention_mask(
encoder_last_hidden_state.shape[1], inputs_dict["attention_mask"]
)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=encoder_attention_mask,
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| Speech2TextModelTester |
python | graphql-python__graphene | graphene/types/tests/test_inputobjecttype.py | {
"start": 366,
"end": 4892
} | class ____(UnmountedType):
def get_type(self):
return MyType
def test_generate_inputobjecttype():
class MyInputObjectType(InputObjectType):
"""Documentation"""
assert MyInputObjectType._meta.name == "MyInputObjectType"
assert MyInputObjectType._meta.description == "Documentation"
assert MyInputObjectType._meta.fields == {}
def test_generate_inputobjecttype_with_meta():
class MyInputObjectType(InputObjectType):
class Meta:
name = "MyOtherInputObjectType"
description = "Documentation"
assert MyInputObjectType._meta.name == "MyOtherInputObjectType"
assert MyInputObjectType._meta.description == "Documentation"
def test_generate_inputobjecttype_with_fields():
class MyInputObjectType(InputObjectType):
field = Field(MyType)
assert "field" in MyInputObjectType._meta.fields
def test_ordered_fields_in_inputobjecttype():
class MyInputObjectType(InputObjectType):
b = InputField(MyType)
a = InputField(MyType)
field = MyScalar()
asa = InputField(MyType)
assert list(MyInputObjectType._meta.fields) == ["b", "a", "field", "asa"]
def test_generate_inputobjecttype_unmountedtype():
class MyInputObjectType(InputObjectType):
field = MyScalar(MyType)
assert "field" in MyInputObjectType._meta.fields
assert isinstance(MyInputObjectType._meta.fields["field"], InputField)
def test_generate_inputobjecttype_as_argument():
class MyInputObjectType(InputObjectType):
field = MyScalar()
class MyObjectType(ObjectType):
field = Field(MyType, input=MyInputObjectType())
assert "field" in MyObjectType._meta.fields
field = MyObjectType._meta.fields["field"]
assert isinstance(field, Field)
assert field.type == MyType
assert "input" in field.args
assert isinstance(field.args["input"], Argument)
assert field.args["input"].type == MyInputObjectType
def test_generate_inputobjecttype_inherit_abstracttype():
class MyAbstractType:
field1 = MyScalar(MyType)
class MyInputObjectType(InputObjectType, MyAbstractType):
field2 = MyScalar(MyType)
assert list(MyInputObjectType._meta.fields) == ["field1", "field2"]
assert [type(x) for x in MyInputObjectType._meta.fields.values()] == [
InputField,
InputField,
]
def test_generate_inputobjecttype_inherit_abstracttype_reversed():
class MyAbstractType:
field1 = MyScalar(MyType)
class MyInputObjectType(MyAbstractType, InputObjectType):
field2 = MyScalar(MyType)
assert list(MyInputObjectType._meta.fields) == ["field1", "field2"]
assert [type(x) for x in MyInputObjectType._meta.fields.values()] == [
InputField,
InputField,
]
def test_inputobjecttype_of_input():
class Child(InputObjectType):
first_name = String()
last_name = String()
@property
def full_name(self):
return f"{self.first_name} {self.last_name}"
class Parent(InputObjectType):
child = InputField(Child)
class Query(ObjectType):
is_child = Boolean(parent=Parent())
def resolve_is_child(self, info, parent):
return (
isinstance(parent.child, Child)
and parent.child.full_name == "Peter Griffin"
)
schema = Schema(query=Query)
result = schema.execute(
"""query basequery {
isChild(parent: {child: {firstName: "Peter", lastName: "Griffin"}})
}
"""
)
assert not result.errors
assert result.data == {"isChild": True}
def test_inputobjecttype_default_input_as_undefined(
set_default_input_object_type_to_undefined,
):
class TestUndefinedInput(InputObjectType):
required_field = String(required=True)
optional_field = String()
class Query(ObjectType):
undefined_optionals_work = Field(NonNull(Boolean), input=TestUndefinedInput())
def resolve_undefined_optionals_work(self, info, input: TestUndefinedInput):
# Confirm that optional_field comes as Undefined
return (
input.required_field == "required" and input.optional_field is Undefined
)
schema = Schema(query=Query)
result = schema.execute(
"""query basequery {
undefinedOptionalsWork(input: {requiredField: "required"})
}
"""
)
assert not result.errors
assert result.data == {"undefinedOptionalsWork": True}
| MyScalar |
python | ipython__ipython | IPython/core/displaypub.py | {
"start": 1092,
"end": 6229
} | class ____(Configurable):
"""A traited class that publishes display data to frontends.
Instances of this class are created by the main IPython object and should
be accessed there.
"""
def __init__(self, shell=None, *args, **kwargs):
self.shell = shell
self._is_publishing = False
self._in_post_execute = False
if self.shell:
self._setup_execution_tracking()
super().__init__(*args, **kwargs)
def _validate_data(self, data, metadata=None):
"""Validate the display data.
Parameters
----------
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(data, dict):
raise TypeError("data must be a dict, got: %r" % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict, got: %r" % data)
def _setup_execution_tracking(self):
"""Set up hooks to track execution state"""
self.shell.events.register("post_execute", self._on_post_execute)
self.shell.events.register("pre_execute", self._on_pre_execute)
def _on_post_execute(self):
"""Called at start of post_execute phase"""
self._in_post_execute = True
def _on_pre_execute(self):
"""Called at start of pre_execute phase"""
self._in_post_execute = False
# use * to indicate transient, update are keyword-only
def publish(
self,
data,
metadata=None,
source=_sentinel,
*,
transient=None,
update=False,
**kwargs,
) -> None:
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. Metadata specific to each mime-type can be specified
in the metadata dict with the same mime-type keys as
the data itself.
source : str, deprecated
Unused.
transient : dict, keyword-only
A dictionary for transient data.
Data in this dictionary should not be persisted as part of saving this output.
Examples include 'display_id'.
update : bool, keyword-only, default: False
If True, only update existing outputs with the same display_id,
rather than creating a new output.
"""
if source is not _sentinel:
import warnings
warnings.warn(
"The 'source' parameter is deprecated since IPython 3.0 and will be ignored "
"(this warning is present since 9.0). `source` parameter will be removed in the future.",
DeprecationWarning,
stacklevel=2,
)
handlers: t.Dict = {}
if self.shell is not None:
handlers = getattr(self.shell, "mime_renderers", {})
outputs = self.shell.history_manager.outputs
target_execution_count = self.shell.execution_count
if self._in_post_execute:
# We're in post_execute, so this is likely a matplotlib flush
# Use execution_count - 1 to associate with the cell that created the plot
target_execution_count = self.shell.execution_count - 1
outputs[target_execution_count].append(
HistoryOutput(output_type="display_data", bundle=data)
)
for mime, handler in handlers.items():
if mime in data:
handler(data[mime], metadata.get(mime, None))
return
self._is_publishing = True
if "text/plain" in data:
print(data["text/plain"])
self._is_publishing = False
@property
def is_publishing(self):
return self._is_publishing
def clear_output(self, wait=False):
"""Clear the output of the cell receiving output."""
print("\033[2K\r", end="")
sys.stdout.flush()
print("\033[2K\r", end="")
sys.stderr.flush()
| DisplayPublisher |
python | PyCQA__pylint | tests/functional/g/generic_alias/generic_alias_collections.py | {
"start": 2277,
"end": 2384
} | class ____(list[collections.abc.Iterable[int]]):
pass
# Multiple generic base classes
| DerivedListIterable |
python | django__django | tests/delete/models.py | {
"start": 7649,
"end": 7803
} | class ____(models.Model):
generic_delete_bottom = models.ForeignKey(
GenericDeleteBottom, on_delete=models.CASCADE
)
| GenericDeleteBottomParent |
python | django__django | tests/migrations/migrations_test_apps/lookuperror_a/migrations/0001_initial.py | {
"start": 43,
"end": 525
} | class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="A1",
fields=[
(
"id",
models.AutoField(
serialize=False,
verbose_name="ID",
auto_created=True,
primary_key=True,
),
),
],
),
]
| Migration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.