language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numpy__numpy | numpy/distutils/tests/test_misc_util.py | {
"start": 2060,
"end": 2413
} | class ____:
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
| TestGpaths |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0044_alter_version_documentation_type.py | {
"start": 149,
"end": 1045
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0043_add_cancelled_state"),
]
operations = [
migrations.AlterField(
model_name="version",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
("mkdocs_html", "Mkdocs Html Pages"),
("generic", "Generic"),
],
default="sphinx",
help_text="Type of documentation the version was built with.",
max_length=20,
verbose_name="Documentation type",
),
),
]
| Migration |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/gdrive_to_gcs.py | {
"start": 1170,
"end": 4308
} | class ____(BaseOperator):
"""
Writes a Google Drive file into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToGCSOperator`
:param bucket_name: The destination Google cloud storage bucket where the
file should be written to
:param object_name: The Google Cloud Storage object name for the object created by the operator.
For example: ``path/to/my/file/file.txt``.
:param folder_id: The folder id of the folder in which the Google Drive file resides
:param file_name: The name of the file residing in Google Drive
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:param gcp_conn_id: The GCP connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket_name",
"object_name",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
)
def __init__(
self,
*,
bucket_name: str,
object_name: str | None = None,
file_name: str,
folder_id: str,
drive_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
gdrive_hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with gcs_hook.provide_file_and_upload(
bucket_name=self.bucket_name, object_name=self.object_name
) as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
def dry_run(self):
"""Perform a dry run of the operator."""
return None
| GoogleDriveToGCSOperator |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 8995,
"end": 9735
} | class ____(nn.Module):
def __init__(self, config: CpmAntConfig):
super().__init__()
self.w_0 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
self.w_1 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
self.act = torch.nn.GELU()
def forward(self, hidden_states: torch.Tensor):
"""Transform an input tensor from one feature space to another via a nonlinear operation
Args:
hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
"""
gate_score = self.act(self.w_0(hidden_states))
hidden_states = self.w_1(hidden_states)
hidden_states = gate_score * hidden_states
return hidden_states
| CpmAntDenseGatedACT |
python | gevent__gevent | src/gevent/tests/test__timeout.py | {
"start": 720,
"end": 5243
} | class ____(greentest.TestCase):
def _test(self, timeout, close):
try:
get_hub().switch()
self.fail('Must raise Timeout')
except gevent.Timeout as ex:
if ex is not timeout:
raise
if close:
ex.close()
return ex
def _check_expires(self, timeout):
timeout.start()
self._test(timeout, False)
# Restart
timeout.start()
return self._test(timeout, True)
def test_expires(self):
timeout = gevent.Timeout(SHOULD_EXPIRE)
self._check_expires(timeout)
def test_expires_false(self):
# A False exception value only matters to a
# context manager
timeout = gevent.Timeout(SHOULD_EXPIRE, False)
self._check_expires(timeout)
def test_expires_str(self):
# str values are accepted but not documented; they change
# the message
timeout = gevent.Timeout(SHOULD_EXPIRE, 'XXX')
ex = self._check_expires(timeout)
self.assertTrue(str(ex).endswith('XXX'))
def assert_type_err(self, ex):
# PyPy3 uses 'exceptions must derive', everyone else uses "exceptions must be"
self.assertTrue("exceptions must be" in str(ex) or "exceptions must derive" in str(ex), str(ex))
def test_expires_non_exception(self):
timeout = gevent.Timeout(SHOULD_EXPIRE, object())
timeout.start()
try:
get_hub().switch()
self.fail("Most raise TypeError")
except TypeError as ex:
self.assert_type_err(ex)
timeout.close()
class OldStyle:
pass
timeout = gevent.Timeout(SHOULD_EXPIRE, OldStyle) # Type
timeout.start()
try:
get_hub().switch()
self.fail("Must raise OldStyle")
except TypeError as ex:
self.assertTrue(greentest.PY3, "Py3 raises a TypeError for non-BaseExceptions")
self.assert_type_err(ex)
except: # pylint:disable=bare-except
self.assertTrue(greentest.PY2, "Old style classes can only be raised on Py2")
t = sys.exc_info()[0]
self.assertEqual(t, OldStyle)
timeout.close()
timeout = gevent.Timeout(SHOULD_EXPIRE, OldStyle()) # instance
timeout.start()
try:
get_hub().switch()
self.fail("Must raise OldStyle")
except TypeError as ex:
self.assertTrue(greentest.PY3, "Py3 raises a TypeError for non-BaseExceptions")
self.assert_type_err(ex)
except: # pylint:disable=bare-except
self.assertTrue(greentest.PY2, "Old style classes can only be raised on Py2")
t = sys.exc_info()[0]
self.assertEqual(t, OldStyle)
timeout.close()
def _check_context_manager_expires(self, timeout, raises=True):
try:
with timeout:
get_hub().switch()
except gevent.Timeout as ex:
if ex is not timeout:
raise
return ex
if raises:
self.fail("Must raise Timeout")
def test_context_manager(self):
timeout = gevent.Timeout(SHOULD_EXPIRE)
self._check_context_manager_expires(timeout)
def test_context_manager_false(self):
# Suppress the exception
timeout = gevent.Timeout(SHOULD_EXPIRE, False)
self._check_context_manager_expires(timeout, raises=False)
self.assertTrue(str(timeout).endswith('(silent)'), str(timeout))
def test_context_manager_str(self):
timeout = gevent.Timeout(SHOULD_EXPIRE, 'XXX')
ex = self._check_context_manager_expires(timeout)
self.assertTrue(str(ex).endswith('XXX'), str(ex))
def test_cancel(self):
timeout = gevent.Timeout(SHOULD_EXPIRE)
timeout.start()
timeout.cancel()
gevent.sleep(SHOULD_NOT_EXPIRE)
self.assertFalse(timeout.pending, timeout)
timeout.close()
@greentest.ignores_leakcheck
def test_with_timeout(self):
with self.assertRaises(gevent.Timeout):
gevent.with_timeout(SHOULD_EXPIRE, gevent.sleep, SHOULD_NOT_EXPIRE)
X = object()
r = gevent.with_timeout(SHOULD_EXPIRE, gevent.sleep, SHOULD_NOT_EXPIRE, timeout_value=X)
self.assertIs(r, X)
r = gevent.with_timeout(SHOULD_NOT_EXPIRE, gevent.sleep, SHOULD_EXPIRE, timeout_value=X)
self.assertIsNone(r)
if __name__ == '__main__':
greentest.main()
| Test |
python | run-llama__llama_index | llama-index-core/tests/prompts/test_guidance_utils.py | {
"start": 656,
"end": 1330
} | class ____(BaseModel):
__test__ = False
attr2: List[TestSimpleModel]
EXPECTED_NESTED_STR = """\
{
"attr2": [{{#geneach 'attr2' stop=']'}}{{#unless @first}}, {{/unless}}{
"attr0": "{{gen 'attr0' stop='"'}}",
"attr1": "{{gen 'attr1' stop='"'}}",
}{{/geneach}}],
}\
"""
def test_convert_pydantic_to_guidance_output_template_simple() -> None:
output_str = pydantic_to_guidance_output_template(TestSimpleModel)
assert output_str == EXPECTED_SIMPLE_STR
def test_convert_pydantic_to_guidance_output_template_nested() -> None:
output_str = pydantic_to_guidance_output_template(TestNestedModel)
assert output_str == EXPECTED_NESTED_STR
| TestNestedModel |
python | doocs__leetcode | solution/1500-1599/1504.Count Submatrices With All Ones/Solution.py | {
"start": 0,
"end": 535
} | class ____:
def numSubmat(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
g = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if mat[i][j]:
g[i][j] = 1 if j == 0 else 1 + g[i][j - 1]
ans = 0
for i in range(m):
for j in range(n):
col = inf
for k in range(i, -1, -1):
col = min(col, g[k][j])
ans += col
return ans
| Solution |
python | numpy__numpy | numpy/_core/tests/test_simd.py | {
"start": 11790,
"end": 12383
} | class ____(_Test_Utility):
"""
To only test double precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
vdata_b = self.mul(vdata_a, self.setall(-1.5))
data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
vround = self.round_s32(vdata_a, vdata_b)
assert vround == data_round
| _SIMD_FP64 |
python | geekcomputers__Python | thired-party-haarcascade-mustache-on-face/utils.py | {
"start": 955,
"end": 2507
} | class ____(object):
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"360p": (480, 360),
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
"avi": cv2.VideoWriter_fourcc(*"XVID"),
# 'mp4': cv2.VideoWriter_fourcc(*'H264'),
"mp4": cv2.VideoWriter_fourcc(*"XVID"),
}
width = 640
height = 480
dims = (640, 480)
capture = None
video_type = None
def __init__(self, capture, filepath, res="480p", *args, **kwargs):
self.capture = capture
self.filepath = filepath
self.width, self.height = self.get_dims(res=res)
self.video_type = self.get_video_type()
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(self, width, height):
self.capture.set(3, width)
self.capture.set(4, height)
def get_dims(self, res="480p"):
width, height = self.STD_DIMENSIONS["480p"]
if res in self.STD_DIMENSIONS:
width, height = self.STD_DIMENSIONS[res]
self.change_res(width, height)
self.dims = (width, height)
return width, height
def get_video_type(self):
filename, ext = os.path.splitext(self.filepath)
if ext in self.VIDEO_TYPE:
return self.VIDEO_TYPE[ext]
return self.VIDEO_TYPE["avi"]
| CFEVideoConf |
python | astropy__astropy | astropy/units/tests/test_quantity.py | {
"start": 64278,
"end": 66375
} | class ____:
def setup_method(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.0) * u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0))
l2 = self.Length2(np.arange(5.0))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.0))
def test_view(self):
l = (np.arange(5.0) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.0) * u.s).view(self.Length)
v = np.arange(5.0).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.0) * u.cm)
sum1 = l + 1.0 * u.m
assert type(sum1) is self.Length
sum2 = 1.0 * u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.0 * u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1.0, my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1.0, my_unit, subok=True)
assert type(q2) is MyQuantity
| TestSpecificTypeQuantity |
python | spack__spack | lib/spack/spack/vendor/archspec/cpu/detect.py | {
"start": 4010,
"end": 16649
} | class ____:
"""Collects the information we need on the host CPU from cpuid"""
# pylint: disable=too-few-public-methods
def __init__(self):
self.cpuid = CPUID()
registers = self.cpuid.registers_for(**CPUID_JSON["vendor"]["input"])
self.highest_basic_support = registers.eax
self.vendor = struct.pack("III", registers.ebx, registers.edx, registers.ecx).decode(
"utf-8"
)
registers = self.cpuid.registers_for(**CPUID_JSON["highest_extension_support"]["input"])
self.highest_extension_support = registers.eax
self.features = self._features()
def _features(self):
result = set()
def check_features(data):
registers = self.cpuid.registers_for(**data["input"])
for feature_check in data["bits"]:
current = getattr(registers, feature_check["register"])
if self._is_bit_set(current, feature_check["bit"]):
result.add(feature_check["name"])
for call_data in CPUID_JSON["flags"]:
if call_data["input"]["eax"] > self.highest_basic_support:
continue
check_features(call_data)
for call_data in CPUID_JSON["extension-flags"]:
if call_data["input"]["eax"] > self.highest_extension_support:
continue
check_features(call_data)
return result
def _is_bit_set(self, register: int, bit: int) -> bool:
mask = 1 << bit
return register & mask > 0
def brand_string(self) -> Optional[str]:
"""Returns the brand string, if available."""
if self.highest_extension_support < 0x80000004:
return None
r1 = self.cpuid.registers_for(eax=0x80000002, ecx=0)
r2 = self.cpuid.registers_for(eax=0x80000003, ecx=0)
r3 = self.cpuid.registers_for(eax=0x80000004, ecx=0)
result = struct.pack(
"IIIIIIIIIIII",
r1.eax,
r1.ebx,
r1.ecx,
r1.edx,
r2.eax,
r2.ebx,
r2.ecx,
r2.edx,
r3.eax,
r3.ebx,
r3.ecx,
r3.edx,
).decode("utf-8")
return result.strip("\x00")
@detection(operating_system="Windows")
def cpuid_info():
"""Returns a partial Microarchitecture, obtained from running the cpuid instruction"""
architecture = _machine()
if architecture == X86_64:
data = CpuidInfoCollector()
return partial_uarch(vendor=data.vendor, features=data.features)
return generic_microarchitecture(architecture)
def _check_output(args, env):
with subprocess.Popen(args, stdout=subprocess.PIPE, env=env) as proc:
output = proc.communicate()[0]
return str(output.decode("utf-8"))
WINDOWS_MAPPING = {
"AMD64": X86_64,
"ARM64": AARCH64,
}
def _machine() -> str:
"""Return the machine architecture we are on"""
operating_system = platform.system()
# If we are not on Darwin or Windows, trust what Python tells us
if operating_system not in ("Darwin", "Windows"):
return platform.machine()
# Normalize windows specific names
if operating_system == "Windows":
platform_machine = platform.machine()
return WINDOWS_MAPPING.get(platform_machine, platform_machine)
# On Darwin it might happen that we are on M1, but using an interpreter
# built for x86_64. In that case "platform.machine() == 'x86_64'", so we
# need to fix that.
#
# See: https://bugs.python.org/issue42704
output = _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if "Apple" in output:
# Note that a native Python interpreter on Apple M1 would return
# "arm64" instead of "aarch64". Here we normalize to the latter.
return AARCH64
return X86_64
@detection(operating_system="Darwin")
def sysctl_info() -> Microarchitecture:
"""Returns a raw info dictionary parsing the output of sysctl."""
child_environment = _ensure_bin_usrbin_in_path()
def sysctl(*args: str) -> str:
return _check_output(["sysctl", *args], env=child_environment).strip()
if _machine() == X86_64:
raw_features = sysctl(
"-n",
"machdep.cpu.features",
"machdep.cpu.leaf7_features",
"machdep.cpu.extfeatures",
)
features = set(raw_features.lower().split())
# Flags detected on Darwin turned to their linux counterpart
for darwin_flags, linux_flags in TARGETS_JSON["conversions"]["darwin_flags"].items():
if all(x in features for x in darwin_flags.split()):
features.update(linux_flags.split())
return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features)
model = "unknown"
model_str = sysctl("-n", "machdep.cpu.brand_string").lower()
if "m4" in model_str:
model = "m4"
elif "m3" in model_str:
model = "m3"
elif "m2" in model_str:
model = "m2"
elif "m1" in model_str:
model = "m1"
elif "apple" in model_str:
model = "m1"
return partial_uarch(name=model, vendor="Apple")
def _ensure_bin_usrbin_in_path():
# Make sure that /sbin and /usr/sbin are in PATH as sysctl is usually found there
child_environment = dict(os.environ.items())
search_paths = child_environment.get("PATH", "").split(os.pathsep)
for additional_path in ("/sbin", "/usr/sbin"):
if additional_path not in search_paths:
search_paths.append(additional_path)
child_environment["PATH"] = os.pathsep.join(search_paths)
return child_environment
def _canonicalize_aarch64_vendor(data: Dict[str, str]) -> str:
"""Adjust the vendor field to make it human-readable"""
if "CPU implementer" not in data:
return "generic"
# Mapping numeric codes to vendor (ARM). This list is a merge from
# different sources:
#
# https://github.com/karelzak/util-linux/blob/master/sys-utils/lscpu-arm.c
# https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile
# https://github.com/gcc-mirror/gcc/blob/master/gcc/config/aarch64/aarch64-cores.def
# https://patchwork.kernel.org/patch/10524949/
arm_vendors = TARGETS_JSON["conversions"]["arm_vendors"]
arm_code = data["CPU implementer"]
return arm_vendors.get(arm_code, arm_code)
def _feature_set(data: Dict[str, str], key: str) -> Set[str]:
return set(data.get(key, "").split())
def detected_info() -> Microarchitecture:
"""Returns a partial Microarchitecture with information on the CPU of the current host.
This function calls all the viable factories one after the other until there's one that is
able to produce the requested information. Falls-back to a generic microarchitecture, if none
of the calls succeed.
"""
# pylint: disable=broad-except
for factory in INFO_FACTORY[platform.system()]:
try:
return factory()
except Exception as exc:
warnings.warn(str(exc))
return generic_microarchitecture(_machine())
def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitecture]:
"""Returns an unordered list of known micro-architectures that are compatible with the
partial Microarchitecture passed as input.
"""
architecture_family = _machine()
# If a tester is not registered, assume no known target is compatible with the host
tester = COMPATIBILITY_CHECKS.get(architecture_family, lambda x, y: False)
return [x for x in TARGETS.values() if tester(info, x)] or [
generic_microarchitecture(architecture_family)
]
def host() -> Microarchitecture:
"""Detects the host micro-architecture and returns it."""
# Retrieve information on the host's cpu
info = detected_info()
# Get a list of possible candidates for this micro-architecture
candidates = compatible_microarchitectures(info)
# Sorting criteria for candidates
def sorting_fn(item):
return len(item.ancestors), len(item.features)
# Get the best generic micro-architecture
generic_candidates = [c for c in candidates if c.vendor == "generic"]
best_generic = max(generic_candidates, key=sorting_fn)
# Relevant for AArch64. Filter on "cpu_part" if we have any match
if info.cpu_part != "" and any(c for c in candidates if info.cpu_part == c.cpu_part):
candidates = [c for c in candidates if info.cpu_part == c.cpu_part]
# Filter the candidates to be descendant of the best generic candidate.
# This is to avoid that the lack of a niche feature that can be disabled
# from e.g. BIOS prevents detection of a reasonably performant architecture
candidates = [c for c in candidates if c > best_generic]
# If we don't have candidates, return the best generic micro-architecture
if not candidates:
return best_generic
# Reverse sort of the depth for the inheritance tree among only targets we
# can use. This gets the newest target we satisfy.
return max(candidates, key=sorting_fn)
def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]):
"""Decorator to register a function as a proper compatibility check.
A compatibility check function takes a partial Microarchitecture object as a first argument,
and an arbitrary target Microarchitecture as the second argument. It returns True if the
target is compatible with the first argument, False otherwise.
Args:
architecture_family: architecture family for which this test can be used
"""
# Turn the argument into something iterable
if isinstance(architecture_family, str):
architecture_family = (architecture_family,)
def decorator(func):
COMPATIBILITY_CHECKS.update({family: func for family in architecture_family})
return func
return decorator
@compatibility_check(architecture_family=(PPC64LE, PPC64))
def compatibility_check_for_power(info, target):
"""Compatibility check for PPC64 and PPC64LE architectures."""
# We can use a target if it descends from our machine type, and our
# generation (9 for POWER9, etc.) is at least its generation.
arch_root = TARGETS[_machine()]
return (
target == arch_root or arch_root in target.ancestors
) and target.generation <= info.generation
@compatibility_check(architecture_family=X86_64)
def compatibility_check_for_x86_64(info, target):
"""Compatibility check for x86_64 architectures."""
# We can use a target if it descends from our machine type, is from our
# vendor, and we have all of its features
arch_root = TARGETS[X86_64]
return (
(target == arch_root or arch_root in target.ancestors)
and target.vendor in (info.vendor, "generic")
and target.features.issubset(info.features)
)
@compatibility_check(architecture_family=AARCH64)
def compatibility_check_for_aarch64(info, target):
"""Compatibility check for AARCH64 architectures."""
# At the moment, it's not clear how to detect compatibility with
# a specific version of the architecture
if target.vendor == "generic" and target.name != AARCH64:
return False
arch_root = TARGETS[AARCH64]
arch_root_and_vendor = arch_root == target.family and target.vendor in (
info.vendor,
"generic",
)
# On macOS it seems impossible to get all the CPU features
# with syctl info, but for ARM we can get the exact model
if platform.system() == "Darwin":
model = TARGETS[info.name]
return arch_root_and_vendor and (target == model or target in model.ancestors)
return arch_root_and_vendor and target.features.issubset(info.features)
@compatibility_check(architecture_family=RISCV64)
def compatibility_check_for_riscv64(info, target):
"""Compatibility check for riscv64 architectures."""
arch_root = TARGETS[RISCV64]
return (target == arch_root or arch_root in target.ancestors) and (
target.name == info.name or target.vendor == "generic"
)
def brand_string() -> Optional[str]:
"""Returns the brand string of the host, if detected, or None."""
if platform.system() == "Darwin":
return _check_output(
["sysctl", "-n", "machdep.cpu.brand_string"], env=_ensure_bin_usrbin_in_path()
).strip()
if host().family == X86_64:
return CpuidInfoCollector().brand_string()
return None
| CpuidInfoCollector |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/operators/sql.py | {
"start": 30984,
"end": 33978
} | class ____(BaseSQLOperator):
"""
Performs checks against a db.
The ``SQLCheckOperator`` expects a sql query that will return a single row.
Each value on that first row is evaluated using python ``bool`` casting.
If any of the values return ``False`` the check is failed and errors out.
If a Python dict is returned, and any values in the Python dict are ``False``,
the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
* Dictionary with value = ``False`` (``{'DUPLICATE_ID_CHECK': False}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed. (templated)
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which overwrite the defined one in connection
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql", *BaseSQLOperator.template_fields)
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers: ClassVar[dict] = {"sql": "sql"}
ui_color = "#fff7e6"
def __init__(
self,
*,
sql: str,
conn_id: str | None = None,
database: str | None = None,
parameters: Iterable | Mapping[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.parameters = parameters
def execute(self, context: Context):
self.log.info("Executing SQL check: %s", self.sql)
records = self.get_db_hook().get_first(self.sql, self.parameters)
self.log.info("Record: %s", records)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
elif isinstance(records, dict) and not all(records.values()):
self._raise_exception(f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}")
elif not all(records):
self._raise_exception(f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}")
self.log.info("Success.")
| SQLCheckOperator |
python | joke2k__faker | faker/providers/bank/fr_CH/__init__.py | {
"start": 51,
"end": 219
} | class ____(DeChBankProvider):
"""Implement bank provider for ``fr_CH`` locale.
There is no difference from the ``de_CH`` implementation.
"""
pass
| Provider |
python | huggingface__transformers | src/transformers/models/xglm/tokenization_xglm.py | {
"start": 1035,
"end": 5346
} | class ____(TokenizersBackend):
"""
Construct a XGLM tokenizer (backed by HuggingFace's tokenizers library). Based on BPE.
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, a minimal vocabulary is created.
merges (`list[tuple[str, str]]`, *optional*):
Custom merge rules for BPE. If not provided, merges are generated from the vocabulary.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether to add a prefix space before encoding.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
bos_token: str = "<s>",
eos_token: str = "</s>",
sep_token: str = "</s>",
cls_token: str = "<s>",
unk_token: str = "<unk>",
pad_token: str = "<pad>",
vocab: Optional[dict] = None,
merges: Optional[list[tuple[str, str]]] = None,
add_prefix_space: bool = True,
**kwargs,
):
self.num_madeup_words = 7
madeup_words = [f"<madeupword{i}>" for i in range(self.num_madeup_words)]
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or []
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
self.add_prefix_space = add_prefix_space
if vocab is not None:
self._vocab = vocab
else:
self._vocab = [
(str(bos_token), 0.0),
(str(pad_token), 0.0),
(str(eos_token), 0.0),
(str(unk_token), 0.0),
]
self._tokenizer = Tokenizer(Unigram(vocab=self._vocab, unk_id=3, byte_fallback=False))
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace(Regex(r"[\n\r\t]"), " "),
normalizers.NFKC(),
normalizers.Replace(Regex(r" {2,}"), " "),
]
)
prepend_scheme = "always" if add_prefix_space else "never"
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{self.eos_token} $A {self.eos_token}",
pair=f"{self.eos_token} $A {self.eos_token} {self.eos_token} $B {self.eos_token}",
special_tokens=[
(self.bos_token, self.bos_token_id),
(self.eos_token, self.eos_token_id),
],
)
__all__ = ["XGLMTokenizer"]
| XGLMTokenizer |
python | sqlalchemy__sqlalchemy | test/orm/test_joins.py | {
"start": 101887,
"end": 105327
} | class ____(fixtures.MappedTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"nodes",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"node_to_nodes",
metadata,
Column(
"left_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
Column(
"right_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
)
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
Node, nodes, node_to_nodes = (
cls.classes.Node,
cls.tables.nodes,
cls.tables.node_to_nodes,
)
cls.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node,
lazy="select",
secondary=node_to_nodes,
primaryjoin=nodes.c.id == node_to_nodes.c.left_node_id,
secondaryjoin=nodes.c.id == node_to_nodes.c.right_node_id,
)
},
)
sess = Session(connection)
n1 = Node(data="n1")
n2 = Node(data="n2")
n3 = Node(data="n3")
n4 = Node(data="n4")
n5 = Node(data="n5")
n6 = Node(data="n6")
n7 = Node(data="n7")
n1.children = [n2, n3, n4]
n2.children = [n3, n6, n7]
n3.children = [n5, n4]
sess.add(n1)
sess.add(n2)
sess.add(n3)
sess.add(n4)
sess.flush()
sess.close()
def test_any(self):
Node = self.classes.Node
sess = fixture_session()
eq_(
sess.query(Node)
.filter(Node.children.any(Node.data == "n3"))
.order_by(Node.data)
.all(),
[Node(data="n1"), Node(data="n2")],
)
def test_contains(self):
Node = self.classes.Node
sess = fixture_session()
n4 = sess.query(Node).filter_by(data="n4").one()
eq_(
sess.query(Node)
.filter(Node.children.contains(n4))
.order_by(Node.data)
.all(),
[Node(data="n1"), Node(data="n3")],
)
eq_(
sess.query(Node)
.filter(not_(Node.children.contains(n4)))
.order_by(Node.data)
.all(),
[
Node(data="n2"),
Node(data="n4"),
Node(data="n5"),
Node(data="n6"),
Node(data="n7"),
],
)
def test_explicit_join(self):
Node = self.classes.Node
sess = fixture_session()
n1 = aliased(Node)
eq_(
sess.query(Node)
.select_from(join(Node, n1, Node.children))
.filter(n1.data.in_(["n3", "n7"]))
.order_by(Node.id)
.all(),
[Node(data="n1"), Node(data="n2")],
)
| SelfReferentialM2MTest |
python | getsentry__sentry | src/sentry/taskworker/router.py | {
"start": 254,
"end": 339
} | class ____(Protocol):
def route_namespace(self, name: str) -> Topic: ...
| TaskRouter |
python | PrefectHQ__prefect | tests/server/models/test_concurrency_limits.py | {
"start": 1571,
"end": 4296
} | class ____:
async def test_resetting_concurrency_limit(self, session):
concurrency_limit = await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="this bad boy", concurrency_limit=100
),
)
concurrency_limit.active_slots = [str(uuid4()) for _ in range(50)]
limit_before_reset = (
await models.concurrency_limits.read_concurrency_limit_by_tag(
session, "this bad boy"
)
)
assert len(limit_before_reset.active_slots) == 50
await models.concurrency_limits.reset_concurrency_limit_by_tag(
session, "this bad boy"
)
(
await models.concurrency_limits.read_concurrency_limit_by_tag(
session, "this bad boy"
)
)
assert len(limit_before_reset.active_slots) == 0
async def test_resetting_concurrency_limit_with_override(self, session):
concurrency_limit = await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="this bad boy", concurrency_limit=100
),
)
concurrency_limit.active_slots = [str(uuid4()) for _ in range(50)]
limit_before_reset = (
await models.concurrency_limits.read_concurrency_limit_by_tag(
session, "this bad boy"
)
)
assert len(limit_before_reset.active_slots) == 50
await models.concurrency_limits.reset_concurrency_limit_by_tag(
session, "this bad boy", slot_override=[uuid4() for _ in range(42)]
)
(
await models.concurrency_limits.read_concurrency_limit_by_tag(
session, "this bad boy"
)
)
assert len(limit_before_reset.active_slots) == 42
async def test_resetting_limit_returns_limit(self, session):
await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="this bad boy", concurrency_limit=100
),
)
result = await models.concurrency_limits.reset_concurrency_limit_by_tag(
session, "this bad boy"
)
assert len(result.active_slots) == 0
async def test_resetting_limit_returns_none_when_missing_limit(self, session):
result = await models.concurrency_limits.reset_concurrency_limit_by_tag(
session, "this missing_limit"
)
assert result is None
| TestResettingConcurrencyLimits |
python | eventlet__eventlet | tests/greenpool_test.py | {
"start": 11452,
"end": 11808
} | class ____(Exception):
pass
r = random.Random(0)
def pressure(arg):
while r.random() < 0.5:
eventlet.sleep(r.random() * 0.001)
if r.random() < 0.8:
return arg
else:
raise StressException(arg)
def passthru(arg):
while r.random() < 0.5:
eventlet.sleep(r.random() * 0.001)
return arg
| StressException |
python | realpython__materials | python-callable-instances/gui.py | {
"start": 0,
"end": 191
} | class ____:
def show(self):
print("Showing the app's main window...")
def __call__(self):
self.show()
window = MainWindow()
window.show() # Or just window()
| MainWindow |
python | pola-rs__polars | py-polars/tests/unit/io/test_skip_batch_predicate.py | {
"start": 457,
"end": 6256
} | class ____(TypedDict):
"""A test case for Skip Batch Predicate."""
min: Any | None
max: Any | None
null_count: int | None
len: int | None
can_skip: bool
def assert_skp_series(
name: str,
dtype: pl.DataType,
expr: pl.Expr,
cases: Sequence[Case],
) -> None:
sbp = expr._skip_batch_predicate({name: dtype})
df = pl.DataFrame(
[
pl.Series(f"{name}_min", [i["min"] for i in cases], dtype),
pl.Series(f"{name}_max", [i["max"] for i in cases], dtype),
pl.Series(f"{name}_nc", [i["null_count"] for i in cases], get_index_type()),
pl.Series("len", [i["len"] for i in cases], get_index_type()),
]
)
mask = pl.Series("can_skip", [i["can_skip"] for i in cases], pl.Boolean)
out = df.select(can_skip=sbp).to_series()
out = out.replace(None, False)
try:
assert_series_equal(out, mask)
except AssertionError:
print(sbp)
raise
def test_true_false_predicate() -> None:
true_sbp = pl.lit(True)._skip_batch_predicate({})
false_sbp = pl.lit(False)._skip_batch_predicate({})
null_sbp = pl.lit(None)._skip_batch_predicate({})
df = pl.DataFrame({"len": [1]})
out = df.select(
true=true_sbp,
false=false_sbp,
null=null_sbp,
)
assert_frame_equal(
out,
pl.DataFrame(
{
"true": [False],
"false": [True],
"null": [True],
}
),
)
def test_equality() -> None:
assert_skp_series(
"a",
pl.Int64(),
pl.col("a") == 5,
[
{"min": 1, "max": 2, "null_count": 0, "len": 42, "can_skip": True},
{"min": 6, "max": 7, "null_count": 0, "len": 42, "can_skip": True},
{"min": 1, "max": 7, "null_count": 0, "len": 42, "can_skip": False},
{"min": None, "max": None, "null_count": 42, "len": 42, "can_skip": True},
],
)
assert_skp_series(
"a",
pl.Int64(),
pl.col("a") != 0,
[
{"min": 0, "max": 0, "null_count": 6, "len": 7, "can_skip": False},
],
)
def test_datetimes() -> None:
d = datetime.datetime(2023, 4, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
td = datetime.timedelta
assert_skp_series(
"a",
pl.Datetime(time_zone=datetime.timezone.utc),
pl.col("a") == d,
[
{
"min": d - td(days=2),
"max": d - td(days=1),
"null_count": 0,
"len": 42,
"can_skip": True,
},
{
"min": d + td(days=1),
"max": d - td(days=2),
"null_count": 0,
"len": 42,
"can_skip": True,
},
{"min": d, "max": d, "null_count": 42, "len": 42, "can_skip": True},
{"min": d, "max": d, "null_count": 0, "len": 42, "can_skip": False},
{
"min": d - td(days=2),
"max": d + td(days=2),
"null_count": 0,
"len": 42,
"can_skip": False,
},
{
"min": d + td(days=1),
"max": None,
"null_count": None,
"len": None,
"can_skip": True,
},
],
)
@given(
s=series(
name="x",
min_size=1,
excluded_dtypes=pl.Extension, # literals with structs containing extensions are not supported yet
),
)
@settings(
report_multiple_bugs=False,
phases=(Phase.explicit, Phase.reuse, Phase.generate, Phase.target, Phase.explain),
)
def test_skip_batch_predicate_parametric(s: pl.Series) -> None:
name = "x"
dtype = s.dtype
value_a = s.slice(0, 1)
lit_a = pl.lit(value_a[0], dtype)
exprs = [
pl.col.x == lit_a,
pl.col.x != lit_a,
pl.col.x.eq_missing(lit_a),
pl.col.x.ne_missing(lit_a),
pl.col.x.is_null(),
pl.col.x.is_not_null(),
]
try:
_ = s > value_a
exprs += [
pl.col.x > lit_a,
pl.col.x >= lit_a,
pl.col.x < lit_a,
pl.col.x <= lit_a,
pl.col.x.is_in(pl.Series([None, value_a[0]], dtype=dtype)),
]
if s.len() > 1:
value_b = s.slice(1, 1)
lit_b = pl.lit(value_b[0], dtype)
exprs += [
pl.col.x.is_between(lit_a, lit_b),
pl.col.x.is_in(pl.Series([value_a[0], value_b[0]], dtype=dtype)),
]
except Exception as _:
pass
for expr in exprs:
sbp = expr._skip_batch_predicate({name: dtype})
if sbp is None:
continue
mins: list[PythonLiteral | None] = [None]
with contextlib.suppress(Exception):
mins = [s.min()]
maxs: list[PythonLiteral | None] = [None]
with contextlib.suppress(Exception):
maxs = [s.max()]
null_counts = [s.null_count()]
lengths = [s.len()]
df = pl.DataFrame(
[
pl.Series(f"{name}_min", mins, dtype),
pl.Series(f"{name}_max", maxs, dtype),
pl.Series(f"{name}_nc", null_counts, get_index_type()),
pl.Series("len", lengths, get_index_type()),
]
)
can_skip = df.select(can_skip=sbp).fill_null(False).to_series()[0]
if can_skip:
try:
assert s.to_frame().filter(expr).height == 0
except Exception as _:
print(expr)
print(sbp)
print(df)
print(s.to_frame().filter(expr))
raise
| Case |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 1873,
"end": 1960
} | class ____:
def m2(self, x):
self.m3(x)
def m3(self, x):
pass
| A4 |
python | django__django | tests/indexes/models.py | {
"start": 526,
"end": 862
} | class ____(models.Model):
article = models.ForeignKey("indexes.Article", models.CASCADE)
article_no_constraint = models.ForeignKey(
"indexes.Article", models.CASCADE, db_constraint=False, related_name="+"
)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
| ArticleTranslation |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_errorbars11.py | {
"start": 315,
"end": 2045
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_errorbars10.xlsx")
# Test for issue #115. We don't add plus_data and minus_data, like in
# test_chart_errorbars10.py, as would be done from user API. Instead
# we ignore the point data in the comparison test.
self.ignore_elements = {
"xl/charts/chart1.xml": ["<c:ptCount", "<c:pt", "<c:v", "</c:pt>"]
}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [69198976, 69200896]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"y_error_bars": {
"type": "custom",
"plus_values": "=Sheet1!$A$1",
"minus_values": "=Sheet1!$B$1:$B$3",
},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__flatbuffers | tests/namespace_test/NamespaceA/TableInFirstNS.py | {
"start": 3464,
"end": 5376
} | class ____(object):
# TableInFirstNST
def __init__(self):
self.fooTable = None # type: Optional[TableInNestedNST]
self.fooEnum = 0 # type: int
self.fooUnionType = 0 # type: int
self.fooUnion = None # type: Union[None, TableInNestedNST]
self.fooStruct = None # type: Optional[StructInNestedNST]
@classmethod
def InitFromBuf(cls, buf, pos):
tableInFirstNS = TableInFirstNS()
tableInFirstNS.Init(buf, pos)
return cls.InitFromObj(tableInFirstNS)
@classmethod
def InitFromObj(cls, tableInFirstNS):
x = TableInFirstNST()
x._UnPack(tableInFirstNS)
return x
# TableInFirstNST
def _UnPack(self, tableInFirstNS):
if tableInFirstNS is None:
return
if tableInFirstNS.FooTable() is not None:
self.fooTable = TableInNestedNST.InitFromObj(tableInFirstNS.FooTable())
self.fooEnum = tableInFirstNS.FooEnum()
self.fooUnionType = tableInFirstNS.FooUnionType()
self.fooUnion = UnionInNestedNSCreator(
self.fooUnionType, tableInFirstNS.FooUnion()
)
if tableInFirstNS.FooStruct() is not None:
self.fooStruct = StructInNestedNST.InitFromObj(tableInFirstNS.FooStruct())
# TableInFirstNST
def Pack(self, builder):
if self.fooTable is not None:
fooTable = self.fooTable.Pack(builder)
if self.fooUnion is not None:
fooUnion = self.fooUnion.Pack(builder)
TableInFirstNSStart(builder)
if self.fooTable is not None:
TableInFirstNSAddFooTable(builder, fooTable)
TableInFirstNSAddFooEnum(builder, self.fooEnum)
TableInFirstNSAddFooUnionType(builder, self.fooUnionType)
if self.fooUnion is not None:
TableInFirstNSAddFooUnion(builder, fooUnion)
if self.fooStruct is not None:
fooStruct = self.fooStruct.Pack(builder)
TableInFirstNSAddFooStruct(builder, fooStruct)
tableInFirstNS = TableInFirstNSEnd(builder)
return tableInFirstNS
| TableInFirstNST |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform2.py | {
"start": 577,
"end": 623
} | class ____(type):
not_a_field: str
| ModelMeta |
python | MongoEngine__mongoengine | mongoengine/base/datastructures.py | {
"start": 14563,
"end": 15813
} | class ____(DBRef):
__slots__ = ("_cached_doc", "passthrough", "document_type")
def fetch(self, force=False):
if not self._cached_doc or force:
self._cached_doc = self.document_type.objects.get(pk=self.pk)
if not self._cached_doc:
raise DoesNotExist("Trying to dereference unknown document %s" % (self))
return self._cached_doc
@property
def pk(self):
return self.id
def __init__(self, document_type, pk, cached_doc=None, passthrough=False):
self.document_type = document_type
self._cached_doc = cached_doc
self.passthrough = passthrough
super().__init__(self.document_type._get_collection_name(), pk)
def __getitem__(self, name):
if not self.passthrough:
raise KeyError()
document = self.fetch()
return document[name]
def __getattr__(self, name):
if not object.__getattribute__(self, "passthrough"):
raise AttributeError()
document = self.fetch()
try:
return document[name]
except KeyError:
raise AttributeError()
def __repr__(self):
return f"<LazyReference({self.document_type}, {self.pk!r})>"
| LazyReference |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 259925,
"end": 280241
} | class ____(Request):
"""
Get all the company's tasks and all public tasks
:param id: List of IDs to filter by
:type id: Sequence[str]
:param name: Get only tasks whose name matches this pattern (python regular
expression syntax)
:type name: str
:param user: List of user IDs used to filter results by the task's creating
user
:type user: Sequence[str]
:param project: List of project IDs
:type project: Sequence[str]
:param page: Page number, returns a specific page out of the resulting list of
tasks
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page. If the first order field is a hyper parameter or metric then
string values are ordered according to numeric ordering rules where applicable
:type order_by: Sequence[str]
:param type: List of task types. One or more of: 'import', 'annotation',
'training' or 'testing' (case insensitive)
:type type: Sequence[str]
:param tags: List of task user-defined tags. Use '-' prefix to exclude tags
:type tags: Sequence[str]
:param system_tags: List of task system tags. Use '-' prefix to exclude system
tags
:type system_tags: Sequence[str]
:param status: List of task status.
:type status: Sequence[TaskStatusEnum]
:param only_fields: List of task field names (nesting is supported using '.',
e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param parent: Parent ID
:type parent: str
:param status_changed: List of status changed constraint strings (utcformat,
epoch) with an optional prefix modifier (\\>,\\>=, \\<, \\<=)
:type status_changed: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
:param search_hidden: If set to 'true' then hidden tasks are included in the
search results
:type search_hidden: bool
:param scroll_id: Scroll ID returned from the previos calls to get_all
:type scroll_id: str
:param refresh_scroll: If set then all the data received with this scroll will
be requeried
:type refresh_scroll: bool
:param size: The number of tasks to retrieve
:type size: int
"""
_service = "tasks"
_action = "get_all"
_version = "2.20"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
},
"dependencies": {"page": ["page_size"]},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"id": {
"description": "List of IDs to filter by",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only tasks whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": "List of task field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the resulting list of tasks",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)",
"minimum": 1,
"type": ["integer", "null"],
},
"parent": {"description": "Parent ID", "type": ["string", "null"]},
"project": {
"description": "List of project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"refresh_scroll": {
"description": "If set then all the data received with this scroll will be requeried",
"type": ["boolean", "null"],
},
"scroll_id": {
"description": "Scroll ID returned from the previos calls to get_all",
"type": ["string", "null"],
},
"search_hidden": {
"default": False,
"description": "If set to 'true' then hidden tasks are included in the search results",
"type": ["boolean", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"size": {
"description": "The number of tasks to retrieve",
"minimum": 1,
"type": ["integer", "null"],
},
"status": {
"description": "List of task status.",
"items": {"$ref": "#/definitions/task_status_enum"},
"type": ["array", "null"],
},
"status_changed": {
"description": "List of status changed constraint strings (utcformat, epoch) with an optional prefix modifier (\\>, \\>=, \\<, \\<=)",
"items": {"pattern": "^(>=|>|<=|<)?.*$", "type": "string"},
"type": ["array", "null"],
},
"system_tags": {
"description": "List of task system tags. Use '-' prefix to exclude system tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of task user-defined tags. Use '-' prefix to exclude tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "List of task types. One or more of: 'training', 'testing', 'inference', 'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc' or 'custom' (case insensitive)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "List of user IDs used to filter results by the task's creating user",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[List[str]] = None,
name: Optional[str] = None,
user: Optional[List[str]] = None,
project: Optional[List[str]] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
order_by: Optional[List[str]] = None,
type: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
status: Optional[List[Any]] = None,
only_fields: Optional[List[str]] = None,
parent: Optional[str] = None,
status_changed: Optional[List[str]] = None,
search_text: Optional[str] = None,
_all_: Any = None,
_any_: Any = None,
search_hidden: Optional[bool] = False,
scroll_id: Optional[str] = None,
refresh_scroll: Optional[bool] = None,
size: Optional[int] = None,
**kwargs: Any
) -> None:
super(GetAllRequest, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.project = project
self.page = page
self.page_size = page_size
self.order_by = order_by
self.type = type
self.tags = tags
self.system_tags = system_tags
self.status = status
self.only_fields = only_fields
self.parent = parent
self.status_changed = status_changed
self.search_text = search_text
self._all_ = _all_
self._any_ = _any_
self.search_hidden = search_hidden
self.scroll_id = scroll_id
self.refresh_scroll = refresh_scroll
self.size = size
@schema_property("id")
def id(self) -> Optional[List[str]]:
return self._property_id
@id.setter
def id(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[List[str]]:
return self._property_user
@user.setter
def user(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", (list, tuple))
self.assert_isinstance(value, "user", six.string_types, is_array=True)
self._property_user = value
@schema_property("project")
def project(self) -> Optional[List[str]]:
return self._property_project
@project.setter
def project(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", (list, tuple))
self.assert_isinstance(value, "project", six.string_types, is_array=True)
self._property_project = value
@schema_property("page")
def page(self) -> Optional[int]:
return self._property_page
@page.setter
def page(self, value: Optional[int]) -> None:
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self) -> Optional[int]:
return self._property_page_size
@page_size.setter
def page_size(self, value: Optional[int]) -> None:
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self) -> Optional[List[str]]:
return self._property_order_by
@order_by.setter
def order_by(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("type")
def type(self) -> Optional[List[str]]:
return self._property_type
@type.setter
def type(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", (list, tuple))
self.assert_isinstance(value, "type", six.string_types, is_array=True)
self._property_type = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status")
def status(self) -> Optional[List[Any]]:
return self._property_status
@status.setter
def status(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_status = None
return
self.assert_isinstance(value, "status", (list, tuple))
if any((isinstance(v, six.string_types) for v in value)):
value = [TaskStatusEnum(v) if isinstance(v, six.string_types) else v for v in value]
else:
self.assert_isinstance(value, "status", TaskStatusEnum, is_array=True)
self._property_status = value
@schema_property("only_fields")
def only_fields(self) -> Optional[List[str]]:
return self._property_only_fields
@only_fields.setter
def only_fields(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("status_changed")
def status_changed(self) -> Optional[List[str]]:
return self._property_status_changed
@status_changed.setter
def status_changed(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", (list, tuple))
self.assert_isinstance(value, "status_changed", six.string_types, is_array=True)
self._property_status_changed = value
@schema_property("search_text")
def search_text(self) -> Optional[str]:
return self._property_search_text
@search_text.setter
def search_text(self, value: Optional[str]) -> None:
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("_all_")
def _all_(self) -> Any:
return self._property__all_
@_all_.setter
def _all_(self, value: Any) -> None:
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self) -> Any:
return self._property__any_
@_any_.setter
def _any_(self, value: Any) -> None:
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
@schema_property("search_hidden")
def search_hidden(self) -> Optional[bool]:
return self._property_search_hidden
@search_hidden.setter
def search_hidden(self, value: Optional[bool]) -> None:
if value is None:
self._property_search_hidden = None
return
self.assert_isinstance(value, "search_hidden", (bool,))
self._property_search_hidden = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("refresh_scroll")
def refresh_scroll(self) -> Optional[bool]:
return self._property_refresh_scroll
@refresh_scroll.setter
def refresh_scroll(self, value: Optional[bool]) -> None:
if value is None:
self._property_refresh_scroll = None
return
self.assert_isinstance(value, "refresh_scroll", (bool,))
self._property_refresh_scroll = value
@schema_property("size")
def size(self) -> Optional[int]:
return self._property_size
@size.setter
def size(self, value: Optional[int]) -> None:
if value is None:
self._property_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "size", six.integer_types)
self._property_size = value
| GetAllRequest |
python | celery__celery | celery/backends/base.py | {
"start": 47738,
"end": 47870
} | class ____(BaseKeyValueStoreBackend, SyncBackendMixin):
"""Result backend base class for key/value stores."""
| KeyValueStoreBackend |
python | python-openxml__python-docx | tests/text/test_parfmt.py | {
"start": 374,
"end": 19674
} | class ____:
def it_knows_its_alignment_value(self, alignment_get_fixture):
paragraph_format, expected_value = alignment_get_fixture
assert paragraph_format.alignment == expected_value
def it_can_change_its_alignment_value(self, alignment_set_fixture):
paragraph_format, value, expected_xml = alignment_set_fixture
paragraph_format.alignment = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_space_before(self, space_before_get_fixture):
paragraph_format, expected_value = space_before_get_fixture
assert paragraph_format.space_before == expected_value
def it_can_change_its_space_before(self, space_before_set_fixture):
paragraph_format, value, expected_xml = space_before_set_fixture
paragraph_format.space_before = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_space_after(self, space_after_get_fixture):
paragraph_format, expected_value = space_after_get_fixture
assert paragraph_format.space_after == expected_value
def it_can_change_its_space_after(self, space_after_set_fixture):
paragraph_format, value, expected_xml = space_after_set_fixture
paragraph_format.space_after = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_line_spacing(self, line_spacing_get_fixture):
paragraph_format, expected_value = line_spacing_get_fixture
assert paragraph_format.line_spacing == expected_value
def it_can_change_its_line_spacing(self, line_spacing_set_fixture):
paragraph_format, value, expected_xml = line_spacing_set_fixture
paragraph_format.line_spacing = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_line_spacing_rule(self, line_spacing_rule_get_fixture):
paragraph_format, expected_value = line_spacing_rule_get_fixture
assert paragraph_format.line_spacing_rule == expected_value
def it_can_change_its_line_spacing_rule(self, line_spacing_rule_set_fixture):
paragraph_format, value, expected_xml = line_spacing_rule_set_fixture
paragraph_format.line_spacing_rule = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_first_line_indent(self, first_indent_get_fixture):
paragraph_format, expected_value = first_indent_get_fixture
assert paragraph_format.first_line_indent == expected_value
def it_can_change_its_first_line_indent(self, first_indent_set_fixture):
paragraph_format, value, expected_xml = first_indent_set_fixture
paragraph_format.first_line_indent = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_left_indent(self, left_indent_get_fixture):
paragraph_format, expected_value = left_indent_get_fixture
assert paragraph_format.left_indent == expected_value
def it_can_change_its_left_indent(self, left_indent_set_fixture):
paragraph_format, value, expected_xml = left_indent_set_fixture
paragraph_format.left_indent = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_right_indent(self, right_indent_get_fixture):
paragraph_format, expected_value = right_indent_get_fixture
assert paragraph_format.right_indent == expected_value
def it_can_change_its_right_indent(self, right_indent_set_fixture):
paragraph_format, value, expected_xml = right_indent_set_fixture
paragraph_format.right_indent = value
assert paragraph_format._element.xml == expected_xml
def it_knows_its_on_off_prop_values(self, on_off_get_fixture):
paragraph_format, prop_name, expected_value = on_off_get_fixture
assert getattr(paragraph_format, prop_name) == expected_value
def it_can_change_its_on_off_props(self, on_off_set_fixture):
paragraph_format, prop_name, value, expected_xml = on_off_set_fixture
setattr(paragraph_format, prop_name, value)
assert paragraph_format._element.xml == expected_xml
def it_provides_access_to_its_tab_stops(self, tab_stops_fixture):
paragraph_format, TabStops_, pPr, tab_stops_ = tab_stops_fixture
tab_stops = paragraph_format.tab_stops
TabStops_.assert_called_once_with(pPr)
assert tab_stops is tab_stops_
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:jc{w:val=center}", WD_ALIGN_PARAGRAPH.CENTER),
]
)
def alignment_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", WD_ALIGN_PARAGRAPH.LEFT, "w:p/w:pPr/w:jc{w:val=left}"),
("w:p/w:pPr", WD_ALIGN_PARAGRAPH.CENTER, "w:p/w:pPr/w:jc{w:val=center}"),
(
"w:p/w:pPr/w:jc{w:val=center}",
WD_ALIGN_PARAGRAPH.RIGHT,
"w:p/w:pPr/w:jc{w:val=right}",
),
("w:p/w:pPr/w:jc{w:val=right}", None, "w:p/w:pPr"),
("w:p", None, "w:p/w:pPr"),
]
)
def alignment_set_fixture(self, request):
p_cxml, value, expected_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:ind", None),
("w:p/w:pPr/w:ind{w:firstLine=240}", Pt(12)),
("w:p/w:pPr/w:ind{w:hanging=240}", Pt(-12)),
]
)
def first_indent_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", Pt(36), "w:p/w:pPr/w:ind{w:firstLine=720}"),
("w:p", Pt(-36), "w:p/w:pPr/w:ind{w:hanging=720}"),
("w:p", 0, "w:p/w:pPr/w:ind{w:firstLine=0}"),
("w:p", None, "w:p/w:pPr"),
("w:p/w:pPr/w:ind{w:firstLine=240}", None, "w:p/w:pPr/w:ind"),
(
"w:p/w:pPr/w:ind{w:firstLine=240}",
Pt(-18),
"w:p/w:pPr/w:ind{w:hanging=360}",
),
(
"w:p/w:pPr/w:ind{w:hanging=240}",
Pt(18),
"w:p/w:pPr/w:ind{w:firstLine=360}",
),
]
)
def first_indent_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:ind", None),
("w:p/w:pPr/w:ind{w:left=120}", Pt(6)),
("w:p/w:pPr/w:ind{w:left=-06.3pt}", Pt(-6.3)),
]
)
def left_indent_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", Pt(36), "w:p/w:pPr/w:ind{w:left=720}"),
("w:p", Pt(-3), "w:p/w:pPr/w:ind{w:left=-60}"),
("w:p", 0, "w:p/w:pPr/w:ind{w:left=0}"),
("w:p", None, "w:p/w:pPr"),
("w:p/w:pPr/w:ind{w:left=240}", None, "w:p/w:pPr/w:ind"),
]
)
def left_indent_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:spacing", None),
("w:p/w:pPr/w:spacing{w:line=420}", 1.75),
("w:p/w:pPr/w:spacing{w:line=840,w:lineRule=exact}", Pt(42)),
("w:p/w:pPr/w:spacing{w:line=840,w:lineRule=atLeast}", Pt(42)),
]
)
def line_spacing_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", 1, "w:p/w:pPr/w:spacing{w:line=240,w:lineRule=auto}"),
("w:p", 2.0, "w:p/w:pPr/w:spacing{w:line=480,w:lineRule=auto}"),
("w:p", Pt(42), "w:p/w:pPr/w:spacing{w:line=840,w:lineRule=exact}"),
("w:p/w:pPr", 2, "w:p/w:pPr/w:spacing{w:line=480,w:lineRule=auto}"),
(
"w:p/w:pPr/w:spacing{w:line=360}",
1,
"w:p/w:pPr/w:spacing{w:line=240,w:lineRule=auto}",
),
(
"w:p/w:pPr/w:spacing{w:line=240,w:lineRule=exact}",
1.75,
"w:p/w:pPr/w:spacing{w:line=420,w:lineRule=auto}",
),
(
"w:p/w:pPr/w:spacing{w:line=240,w:lineRule=atLeast}",
Pt(42),
"w:p/w:pPr/w:spacing{w:line=840,w:lineRule=atLeast}",
),
(
"w:p/w:pPr/w:spacing{w:line=240,w:lineRule=exact}",
None,
"w:p/w:pPr/w:spacing",
),
("w:p/w:pPr", None, "w:p/w:pPr"),
]
)
def line_spacing_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:spacing", None),
("w:p/w:pPr/w:spacing{w:line=240}", WD_LINE_SPACING.SINGLE),
("w:p/w:pPr/w:spacing{w:line=360}", WD_LINE_SPACING.ONE_POINT_FIVE),
("w:p/w:pPr/w:spacing{w:line=480}", WD_LINE_SPACING.DOUBLE),
("w:p/w:pPr/w:spacing{w:line=420}", WD_LINE_SPACING.MULTIPLE),
("w:p/w:pPr/w:spacing{w:lineRule=auto}", WD_LINE_SPACING.MULTIPLE),
("w:p/w:pPr/w:spacing{w:lineRule=exact}", WD_LINE_SPACING.EXACTLY),
("w:p/w:pPr/w:spacing{w:lineRule=atLeast}", WD_LINE_SPACING.AT_LEAST),
]
)
def line_spacing_rule_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
(
"w:p",
WD_LINE_SPACING.SINGLE,
"w:p/w:pPr/w:spacing{w:line=240,w:lineRule=auto}",
),
(
"w:p",
WD_LINE_SPACING.ONE_POINT_FIVE,
"w:p/w:pPr/w:spacing{w:line=360,w:lineRule=auto}",
),
(
"w:p",
WD_LINE_SPACING.DOUBLE,
"w:p/w:pPr/w:spacing{w:line=480,w:lineRule=auto}",
),
("w:p", WD_LINE_SPACING.MULTIPLE, "w:p/w:pPr/w:spacing{w:lineRule=auto}"),
("w:p", WD_LINE_SPACING.EXACTLY, "w:p/w:pPr/w:spacing{w:lineRule=exact}"),
(
"w:p/w:pPr/w:spacing{w:line=280,w:lineRule=exact}",
WD_LINE_SPACING.AT_LEAST,
"w:p/w:pPr/w:spacing{w:line=280,w:lineRule=atLeast}",
),
]
)
def line_spacing_rule_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", "keep_together", None),
("w:p/w:pPr/w:keepLines{w:val=on}", "keep_together", True),
("w:p/w:pPr/w:keepLines{w:val=0}", "keep_together", False),
("w:p", "keep_with_next", None),
("w:p/w:pPr/w:keepNext{w:val=1}", "keep_with_next", True),
("w:p/w:pPr/w:keepNext{w:val=false}", "keep_with_next", False),
("w:p", "page_break_before", None),
("w:p/w:pPr/w:pageBreakBefore", "page_break_before", True),
("w:p/w:pPr/w:pageBreakBefore{w:val=0}", "page_break_before", False),
("w:p", "widow_control", None),
("w:p/w:pPr/w:widowControl{w:val=true}", "widow_control", True),
("w:p/w:pPr/w:widowControl{w:val=off}", "widow_control", False),
]
)
def on_off_get_fixture(self, request):
p_cxml, prop_name, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, prop_name, expected_value
@pytest.fixture(
params=[
("w:p", "keep_together", True, "w:p/w:pPr/w:keepLines"),
("w:p", "keep_with_next", True, "w:p/w:pPr/w:keepNext"),
("w:p", "page_break_before", True, "w:p/w:pPr/w:pageBreakBefore"),
("w:p", "widow_control", True, "w:p/w:pPr/w:widowControl"),
(
"w:p/w:pPr/w:keepLines",
"keep_together",
False,
"w:p/w:pPr/w:keepLines{w:val=0}",
),
(
"w:p/w:pPr/w:keepNext",
"keep_with_next",
False,
"w:p/w:pPr/w:keepNext{w:val=0}",
),
(
"w:p/w:pPr/w:pageBreakBefore",
"page_break_before",
False,
"w:p/w:pPr/w:pageBreakBefore{w:val=0}",
),
(
"w:p/w:pPr/w:widowControl",
"widow_control",
False,
"w:p/w:pPr/w:widowControl{w:val=0}",
),
("w:p/w:pPr/w:keepLines{w:val=0}", "keep_together", None, "w:p/w:pPr"),
("w:p/w:pPr/w:keepNext{w:val=0}", "keep_with_next", None, "w:p/w:pPr"),
(
"w:p/w:pPr/w:pageBreakBefore{w:val=0}",
"page_break_before",
None,
"w:p/w:pPr",
),
("w:p/w:pPr/w:widowControl{w:val=0}", "widow_control", None, "w:p/w:pPr"),
]
)
def on_off_set_fixture(self, request):
p_cxml, prop_name, value, expected_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_cxml)
return paragraph_format, prop_name, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:ind", None),
("w:p/w:pPr/w:ind{w:right=160}", Pt(8)),
("w:p/w:pPr/w:ind{w:right=-4.2pt}", Pt(-4.2)),
]
)
def right_indent_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", Pt(36), "w:p/w:pPr/w:ind{w:right=720}"),
("w:p", Pt(-3), "w:p/w:pPr/w:ind{w:right=-60}"),
("w:p", 0, "w:p/w:pPr/w:ind{w:right=0}"),
("w:p", None, "w:p/w:pPr"),
("w:p/w:pPr/w:ind{w:right=240}", None, "w:p/w:pPr/w:ind"),
]
)
def right_indent_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:spacing", None),
("w:p/w:pPr/w:spacing{w:after=240}", Pt(12)),
]
)
def space_after_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", Pt(12), "w:p/w:pPr/w:spacing{w:after=240}"),
("w:p", None, "w:p/w:pPr"),
("w:p/w:pPr", Pt(12), "w:p/w:pPr/w:spacing{w:after=240}"),
("w:p/w:pPr", None, "w:p/w:pPr"),
("w:p/w:pPr/w:spacing", Pt(12), "w:p/w:pPr/w:spacing{w:after=240}"),
("w:p/w:pPr/w:spacing", None, "w:p/w:pPr/w:spacing"),
(
"w:p/w:pPr/w:spacing{w:after=240}",
Pt(42),
"w:p/w:pPr/w:spacing{w:after=840}",
),
("w:p/w:pPr/w:spacing{w:after=840}", None, "w:p/w:pPr/w:spacing"),
]
)
def space_after_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture(
params=[
("w:p", None),
("w:p/w:pPr", None),
("w:p/w:pPr/w:spacing", None),
("w:p/w:pPr/w:spacing{w:before=420}", Pt(21)),
]
)
def space_before_get_fixture(self, request):
p_cxml, expected_value = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
return paragraph_format, expected_value
@pytest.fixture(
params=[
("w:p", Pt(12), "w:p/w:pPr/w:spacing{w:before=240}"),
("w:p", None, "w:p/w:pPr"),
("w:p/w:pPr", Pt(12), "w:p/w:pPr/w:spacing{w:before=240}"),
("w:p/w:pPr", None, "w:p/w:pPr"),
("w:p/w:pPr/w:spacing", Pt(12), "w:p/w:pPr/w:spacing{w:before=240}"),
("w:p/w:pPr/w:spacing", None, "w:p/w:pPr/w:spacing"),
(
"w:p/w:pPr/w:spacing{w:before=240}",
Pt(42),
"w:p/w:pPr/w:spacing{w:before=840}",
),
("w:p/w:pPr/w:spacing{w:before=840}", None, "w:p/w:pPr/w:spacing"),
]
)
def space_before_set_fixture(self, request):
p_cxml, value, expected_p_cxml = request.param
paragraph_format = ParagraphFormat(element(p_cxml))
expected_xml = xml(expected_p_cxml)
return paragraph_format, value, expected_xml
@pytest.fixture
def tab_stops_fixture(self, TabStops_, tab_stops_):
p = element("w:p/w:pPr")
pPr = p.pPr
paragraph_format = ParagraphFormat(p, None)
return paragraph_format, TabStops_, pPr, tab_stops_
# fixture components ---------------------------------------------
@pytest.fixture
def TabStops_(self, request, tab_stops_):
return class_mock(request, "docx.text.parfmt.TabStops", return_value=tab_stops_)
@pytest.fixture
def tab_stops_(self, request):
return instance_mock(request, TabStops)
| DescribeParagraphFormat |
python | networkx__networkx | networkx/algorithms/bipartite/tests/test_generators.py | {
"start": 363,
"end": 13203
} | class ____:
def test_complete_bipartite_graph(self):
G = complete_bipartite_graph(0, 0)
assert nx.is_isomorphic(G, nx.null_graph())
for i in [1, 5]:
G = complete_bipartite_graph(i, 0)
assert nx.is_isomorphic(G, nx.empty_graph(i))
G = complete_bipartite_graph(0, i)
assert nx.is_isomorphic(G, nx.empty_graph(i))
G = complete_bipartite_graph(2, 2)
assert nx.is_isomorphic(G, nx.cycle_graph(4))
G = complete_bipartite_graph(1, 5)
assert nx.is_isomorphic(G, nx.star_graph(5))
G = complete_bipartite_graph(5, 1)
assert nx.is_isomorphic(G, nx.star_graph(5))
# complete_bipartite_graph(m1,m2) is a connected graph with
# m1+m2 nodes and m1*m2 edges
for m1, m2 in [(5, 11), (7, 3)]:
G = complete_bipartite_graph(m1, m2)
assert nx.number_of_nodes(G) == m1 + m2
assert nx.number_of_edges(G) == m1 * m2
with pytest.raises(nx.NetworkXError):
complete_bipartite_graph(7, 3, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError):
complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph)
mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph)
assert mG.is_multigraph()
assert sorted(mG.edges()) == sorted(G.edges())
mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph)
assert mG.is_multigraph()
assert sorted(mG.edges()) == sorted(G.edges())
mG = complete_bipartite_graph(7, 3) # default to Graph
assert sorted(mG.edges()) == sorted(G.edges())
assert not mG.is_multigraph()
assert not mG.is_directed()
# specify nodes rather than number of nodes
for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]:
G = complete_bipartite_graph(n1, n2)
if isinstance(n1, numbers.Integral):
if isinstance(n2, numbers.Integral):
n2 = range(n1, n1 + n2)
n1 = range(n1)
elif isinstance(n2, numbers.Integral):
n2 = range(n2)
edges = {(u, v) for u in n1 for v in n2}
assert edges == set(G.edges)
assert G.size() == len(edges)
# raise when node sets are not distinct
for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]:
pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2)
def test_configuration_model(self):
aseq = []
bseq = []
G = configuration_model(aseq, bseq)
assert len(G) == 0
aseq = [0, 0]
bseq = [0, 0]
G = configuration_model(aseq, bseq)
assert len(G) == 4
assert G.number_of_edges() == 0
aseq = [3, 3, 3, 3]
bseq = [2, 2, 2, 2, 2]
pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq)
aseq = [3, 3, 3, 3]
bseq = [2, 2, 2, 2, 2, 2]
G = configuration_model(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 2, 2, 2]
bseq = [3, 3, 3, 3]
G = configuration_model(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 1, 1, 1]
bseq = [3, 3, 3]
G = configuration_model(aseq, bseq)
assert G.is_multigraph()
assert not G.is_directed()
assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
assert GU.number_of_nodes() == 6
GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
assert GD.number_of_nodes() == 3
G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
assert not G.is_multigraph()
assert not G.is_directed()
pytest.raises(
nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph()
)
pytest.raises(
nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph
)
pytest.raises(
nx.NetworkXError,
configuration_model,
aseq,
bseq,
create_using=nx.MultiDiGraph,
)
def test_havel_hakimi_graph(self):
aseq = []
bseq = []
G = havel_hakimi_graph(aseq, bseq)
assert len(G) == 0
aseq = [0, 0]
bseq = [0, 0]
G = havel_hakimi_graph(aseq, bseq)
assert len(G) == 4
assert G.number_of_edges() == 0
aseq = [3, 3, 3, 3]
bseq = [2, 2, 2, 2, 2]
pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq)
bseq = [2, 2, 2, 2, 2, 2]
G = havel_hakimi_graph(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 2, 2, 2]
bseq = [3, 3, 3, 3]
G = havel_hakimi_graph(aseq, bseq)
assert G.is_multigraph()
assert not G.is_directed()
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
assert GU.number_of_nodes() == 6
GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
assert GD.number_of_nodes() == 4
G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
assert not G.is_multigraph()
assert not G.is_directed()
pytest.raises(
nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph
)
pytest.raises(
nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph
)
pytest.raises(
nx.NetworkXError,
havel_hakimi_graph,
aseq,
bseq,
create_using=nx.MultiDiGraph,
)
def test_reverse_havel_hakimi_graph(self):
aseq = []
bseq = []
G = reverse_havel_hakimi_graph(aseq, bseq)
assert len(G) == 0
aseq = [0, 0]
bseq = [0, 0]
G = reverse_havel_hakimi_graph(aseq, bseq)
assert len(G) == 4
assert G.number_of_edges() == 0
aseq = [3, 3, 3, 3]
bseq = [2, 2, 2, 2, 2]
pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq)
bseq = [2, 2, 2, 2, 2, 2]
G = reverse_havel_hakimi_graph(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 2, 2, 2]
bseq = [3, 3, 3, 3]
G = reverse_havel_hakimi_graph(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 1, 1, 1]
bseq = [3, 3, 3]
G = reverse_havel_hakimi_graph(aseq, bseq)
assert G.is_multigraph()
assert not G.is_directed()
assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
assert GU.number_of_nodes() == 6
GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
assert GD.number_of_nodes() == 3
G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
assert not G.is_multigraph()
assert not G.is_directed()
pytest.raises(
nx.NetworkXError,
reverse_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.DiGraph,
)
pytest.raises(
nx.NetworkXError,
reverse_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.DiGraph,
)
pytest.raises(
nx.NetworkXError,
reverse_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.MultiDiGraph,
)
def test_alternating_havel_hakimi_graph(self):
aseq = []
bseq = []
G = alternating_havel_hakimi_graph(aseq, bseq)
assert len(G) == 0
aseq = [0, 0]
bseq = [0, 0]
G = alternating_havel_hakimi_graph(aseq, bseq)
assert len(G) == 4
assert G.number_of_edges() == 0
aseq = [3, 3, 3, 3]
bseq = [2, 2, 2, 2, 2]
pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq)
bseq = [2, 2, 2, 2, 2, 2]
G = alternating_havel_hakimi_graph(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 2, 2, 2]
bseq = [3, 3, 3, 3]
G = alternating_havel_hakimi_graph(aseq, bseq)
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
aseq = [2, 2, 2, 1, 1, 1]
bseq = [3, 3, 3]
G = alternating_havel_hakimi_graph(aseq, bseq)
assert G.is_multigraph()
assert not G.is_directed()
assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
assert GU.number_of_nodes() == 6
GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
assert GD.number_of_nodes() == 3
G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
assert not G.is_multigraph()
assert not G.is_directed()
pytest.raises(
nx.NetworkXError,
alternating_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.DiGraph,
)
pytest.raises(
nx.NetworkXError,
alternating_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.DiGraph,
)
pytest.raises(
nx.NetworkXError,
alternating_havel_hakimi_graph,
aseq,
bseq,
create_using=nx.MultiDiGraph,
)
def test_preferential_attachment(self):
aseq = [3, 2, 1, 1]
G = preferential_attachment_graph(aseq, 0.5)
assert G.is_multigraph()
assert not G.is_directed()
G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph)
assert not G.is_multigraph()
assert not G.is_directed()
pytest.raises(
nx.NetworkXError,
preferential_attachment_graph,
aseq,
0.5,
create_using=nx.DiGraph(),
)
pytest.raises(
nx.NetworkXError,
preferential_attachment_graph,
aseq,
0.5,
create_using=nx.DiGraph(),
)
pytest.raises(
nx.NetworkXError,
preferential_attachment_graph,
aseq,
0.5,
create_using=nx.DiGraph(),
)
def test_random_graph(self):
n = 10
m = 20
G = random_graph(n, m, 0.9)
assert len(G) == 30
assert nx.is_bipartite(G)
X, Y = nx.algorithms.bipartite.sets(G)
assert set(range(n)) == X
assert set(range(n, n + m)) == Y
def test_random_digraph(self):
n = 10
m = 20
G = random_graph(n, m, 0.9, directed=True)
assert len(G) == 30
assert nx.is_bipartite(G)
X, Y = nx.algorithms.bipartite.sets(G)
assert set(range(n)) == X
assert set(range(n, n + m)) == Y
def test_gnmk_random_graph(self):
n = 10
m = 20
edges = 100
# set seed because sometimes it is not connected
# which raises an error in bipartite.sets(G) below.
G = gnmk_random_graph(n, m, edges, seed=1234)
assert len(G) == n + m
assert nx.is_bipartite(G)
X, Y = nx.algorithms.bipartite.sets(G)
assert set(range(n)) == X
assert set(range(n, n + m)) == Y
assert edges == len(list(G.edges()))
def test_gnmk_random_graph_complete(self):
n = 10
m = 20
edges = 200
G = gnmk_random_graph(n, m, edges)
assert len(G) == n + m
assert nx.is_bipartite(G)
X, Y = nx.algorithms.bipartite.sets(G)
assert set(range(n)) == X
assert set(range(n, n + m)) == Y
assert edges == len(list(G.edges()))
@pytest.mark.parametrize("n", (4, range(4), {0, 1, 2, 3}))
@pytest.mark.parametrize("m", (range(4, 7), {4, 5, 6}))
def test_complete_bipartite_graph_str(self, n, m):
"""Ensure G.name is consistent for all inputs accepted by nodes_or_number.
See gh-7396"""
G = nx.complete_bipartite_graph(n, m)
ans = "Graph named 'complete_bipartite_graph(4, 3)' with 7 nodes and 12 edges"
assert str(G) == ans
| TestGeneratorsBipartite |
python | doocs__leetcode | solution/1200-1299/1291.Sequential Digits/Solution.py | {
"start": 0,
"end": 315
} | class ____:
def sequentialDigits(self, low: int, high: int) -> List[int]:
ans = []
for i in range(1, 9):
x = i
for j in range(i + 1, 10):
x = x * 10 + j
if low <= x <= high:
ans.append(x)
return sorted(ans)
| Solution |
python | coleifer__peewee | tests/schema.py | {
"start": 30787,
"end": 31659
} | class ____(ModelTestCase):
requires = [TMNamedConstraints]
def setUp(self):
super(TestNamedConstraintsIntegration, self).setUp()
if IS_SQLITE:
self.database.pragma('foreign_keys', 'on')
def test_named_constraints_integration(self):
t = TMNamedConstraints.create(k='k1', v=1) # Sanity test.
fails = [
{'fk': t.id - 1, 'k': 'k2', 'v': 1}, # Invalid fk.
{'fk': t.id, 'k': 'k3', 'v': 0}, # Invalid val.
{'fk': t.id, 'k': 'kx', 'v': 1}] # Invalid key.
for f in fails:
# MySQL may use OperationalError.
with self.assertRaises((IntegrityError, OperationalError)):
with self.database.atomic() as tx:
TMNamedConstraints.create(**f)
self.assertEqual(len(TMNamedConstraints), 1)
| TestNamedConstraintsIntegration |
python | wandb__wandb | wandb/vendor/pygments/lexers/sas.py | {
"start": 401,
"end": 9449
} | class ____(RegexLexer):
"""
For `SAS <http://www.sas.com/>`_ files.
.. versionadded:: 2.2
"""
# Syntax from syntax/sas.vim by James Kidd <james.kidd@covance.com>
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
flags = re.IGNORECASE | re.MULTILINE
builtins_macros = (
"bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
"display", "do", "else", "end", "eval", "global", "goto", "if",
"index", "input", "keydef", "label", "left", "length", "let",
"local", "lowcase", "macro", "mend", "nrquote",
"nrstr", "put", "qleft", "qlowcase", "qscan",
"qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
"str", "substr", "superq", "syscall", "sysevalf", "sysexec",
"sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
"then", "to", "trim", "unquote", "until", "upcase", "verify",
"while", "window"
)
builtins_conditionals = (
"do", "if", "then", "else", "end", "until", "while"
)
builtins_statements = (
"abort", "array", "attrib", "by", "call", "cards", "cards4",
"catname", "continue", "datalines", "datalines4", "delete", "delim",
"delimiter", "display", "dm", "drop", "endsas", "error", "file",
"filename", "footnote", "format", "goto", "in", "infile", "informat",
"input", "keep", "label", "leave", "length", "libname", "link",
"list", "lostcard", "merge", "missing", "modify", "options", "output",
"out", "page", "put", "redirect", "remove", "rename", "replace",
"retain", "return", "select", "set", "skip", "startsas", "stop",
"title", "update", "waitsas", "where", "window", "x", "systask"
)
builtins_sql = (
"add", "and", "alter", "as", "cascade", "check", "create",
"delete", "describe", "distinct", "drop", "foreign", "from",
"group", "having", "index", "insert", "into", "in", "key", "like",
"message", "modify", "msgtype", "not", "null", "on", "or",
"order", "primary", "references", "reset", "restrict", "select",
"set", "table", "unique", "update", "validate", "view", "where"
)
builtins_functions = (
"abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
"attrn", "band", "betainv", "blshift", "bnot", "bor",
"brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
"close", "cnonct", "collate", "compbl", "compound",
"compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
"daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
"datejul", "datepart", "datetime", "day", "dclose", "depdb",
"depdbsl", "depsl", "depsyd",
"deptab", "dequote", "dhms", "dif", "digamma",
"dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
"dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
"fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
"fexist", "fget", "fileexist", "filename", "fileref",
"finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
"fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
"fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
"fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
"hbound", "hms", "hosthelp", "hour", "ibessel", "index",
"indexc", "indexw", "input", "inputc", "inputn", "int",
"intck", "intnx", "intrr", "irr", "jbessel", "juldate",
"kurtosis", "lag", "lbound", "left", "length", "lgamma",
"libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
"logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
"mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
"normal", "note", "npv", "open", "ordinal", "pathname",
"pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
"probbeta", "probbnml", "probchi", "probf", "probgam",
"probhypr", "probit", "probnegb", "probnorm", "probt",
"put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
"ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
"rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
"rewind", "right", "round", "saving", "scan", "sdf", "second",
"sign", "sin", "sinh", "skewness", "soundex", "spedis",
"sqrt", "std", "stderr", "stfips", "stname", "stnamel",
"substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
"sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
"tnonct", "today", "translate", "tranwrd", "trigamma",
"trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
"varfmt", "varinfmt", "varlabel", "varlen", "varname",
"varnum", "varray", "varrayx", "vartype", "verify", "vformat",
"vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
"vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
"vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
"vinformatw", "vinformatwx", "vinformatx", "vlabel",
"vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
"vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
"zipnamel", "zipstate"
)
tokens = {
'root': [
include('comments'),
include('proc-data'),
include('cards-datalines'),
include('logs'),
include('general'),
(r'.', Text),
],
# SAS is multi-line regardless, but * is ended by ;
'comments': [
(r'^\s*\*.*?;', Comment),
(r'/\*.*?\*/', Comment),
(r'^\s*\*(.|\n)*?;', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Special highlight for proc, data, quit, run
'proc-data': [
(r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
Keyword.Reserved),
],
# Special highlight cards and datalines
'cards-datalines': [
(r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
],
'data': [
(r'(.|\n)*^\s*;\s*$', Other, '#pop'),
],
# Special highlight for put NOTE|ERROR|WARNING (order matters)
'logs': [
(r'\n?^\s*%?put ', Keyword, 'log-messages'),
],
'log-messages': [
(r'NOTE(:|-).*', Generic, '#pop'),
(r'WARNING(:|-).*', Generic.Emph, '#pop'),
(r'ERROR(:|-).*', Generic.Error, '#pop'),
include('general'),
],
'general': [
include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
],
# Keywords, statements, functions, macros
'keywords': [
(words(builtins_statements,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_sql,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_conditionals,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_macros,
prefix = r'%',
suffix = r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix = r'\b',
suffix = r'\('),
Name.Builtin),
],
# Strings and user-defined variables and macros (order matters)
'vars-strings': [
(r'&[a-z_]\w{0,31}\.?', Name.Variable),
(r'%[a-z_]\w{0,31}', Name.Function),
(r'\'', String, 'string_squote'),
(r'"', String, 'string_dquote'),
],
'string_squote': [
('\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
# AFAIK, macro variables are not evaluated in single quotes
# (r'&', Name.Variable, 'validvar'),
(r'[^$\'\\]+', String),
(r'[$\'\\]', String),
],
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'&', Name.Variable, 'validvar'),
(r'[^$&"\\]+', String),
(r'[$"\\]', String),
],
'validvar': [
(r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
],
# SAS numbers and special variables
'numbers': [
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
Number),
],
'special': [
(r'(null|missing|_all_|_automatic_|_character_|_n_|'
r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
Keyword.Constant),
],
# 'operators': [
# (r'(-|=|<=|>=|<|>|<>|&|!=|'
# r'\||\*|\+|\^|/|!|~|~=)', Operator)
# ],
}
| SASLexer |
python | gawel__pyquery | tests/test_pyquery.py | {
"start": 7827,
"end": 11342
} | class ____(TestCase):
klass = pq
html = """
<html>
<body>
<div id="node1"><span>node1</span></div>
<div id="node2" class="node3">
<span>node2</span><span> booyah</span></div>
</body>
</html>
"""
html2 = """
<html>
<body>
<dl>
<dt id="term-1">term 1</dt>
<dd>definition 1-a</dd>
<dd>definition 1-b</dd>
<dd>definition 1-c</dd>
<dd>definition 1-d</dd>
<dt id="term-2">term 2</dt>
<dd>definition 2-a</dd>
<dd class="strange">definition 2-b</dd>
<dd>definition 2-c</dd>
<dt id="term-3">term 3</dt>
<dd>definition 3-a</dd>
<dd>definition 3-b</dd>
</dl>
</body>
</html>
"""
def test_filter(self):
assert len(self.klass('div', self.html).filter('.node3')) == 1
assert len(self.klass('div', self.html).filter('#node2')) == 1
assert len(self.klass('div', self.html).filter(lambda i: i == 0)) == 1
d = pq('<p>Hello <b>warming</b> world</p>')
self.assertEqual(d('strong').filter(lambda el: True), [])
def test_not(self):
assert len(self.klass('div', self.html).not_('.node3')) == 1
def test_is(self):
assert self.klass('div', self.html).is_('.node3')
assert not self.klass('div', self.html).is_('.foobazbar')
def test_find(self):
assert len(self.klass('#node1', self.html).find('span')) == 1
assert len(self.klass('#node2', self.html).find('span')) == 2
assert len(self.klass('div', self.html).find('span')) == 3
def test_each(self):
doc = self.klass(self.html)
doc('span').each(lambda: doc(this).wrap("<em></em>")) # NOQA
assert len(doc('em')) == 3
def test_map(self):
def ids_minus_one(i, elem):
return int(self.klass(elem).attr('id')[-1]) - 1
assert self.klass('div', self.html).map(ids_minus_one) == [0, 1]
d = pq('<p>Hello <b>warming</b> world</p>')
self.assertEqual(d('strong').map(lambda i, el: pq(this).text()), []) # NOQA
def test_end(self):
assert len(self.klass('div', self.html).find('span').end()) == 2
assert len(self.klass('#node2', self.html).find('span').end()) == 1
def test_closest(self):
assert len(self.klass('#node1 span', self.html).closest('body')) == 1
assert self.klass('#node2',
self.html).closest('.node3').attr('id') == 'node2'
assert self.klass('.node3', self.html).closest('form') == []
def test_next_all(self):
d = pq(self.html2)
# without filter
self.assertEqual(
len(d('#term-2').next_all()), 6)
# with filter
self.assertEqual(
len(d('#term-2').next_all('dd')), 5)
# when empty
self.assertEqual(
d('#NOTHING').next_all(), [])
def test_next_until(self):
d = pq(self.html2)
# without filter
self.assertEqual(
len(d('#term-2').next_until('dt')), 3)
# with filter
self.assertEqual(
len(d('#term-2').next_until('dt', ':not(.strange)')), 2)
# when empty
self.assertEqual(
d('#NOTHING').next_until('*'), [])
| TestTraversal |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/incident_groupopenperiod_serializer.py | {
"start": 220,
"end": 418
} | class ____(TypedDict):
incidentId: str | None
incidentIdentifier: int | None
groupId: str
openPeriodId: str
@register(IncidentGroupOpenPeriod)
| IncidentGroupOpenPeriodSerializerResponse |
python | redis__redis-py | redis/sentinel.py | {
"start": 4601,
"end": 6591
} | class ____(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs["connection_class"] = kwargs.get(
"connection_class",
(
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection
),
)
self.is_master = kwargs.pop("is_master", True)
self.check_connection = kwargs.pop("check_connection", False)
self.proxy = SentinelConnectionPoolProxy(
connection_pool=self,
is_master=self.is_master,
check_connection=self.check_connection,
service_name=service_name,
sentinel_manager=sentinel_manager,
)
super().__init__(**kwargs)
self.connection_kwargs["connection_pool"] = self.proxy
self.service_name = service_name
self.sentinel_manager = sentinel_manager
def __repr__(self):
role = "master" if self.is_master else "slave"
return (
f"<{type(self).__module__}.{type(self).__name__}"
f"(service={self.service_name}({role}))>"
)
def reset(self):
super().reset()
self.proxy.reset()
@property
def master_address(self):
return self.proxy.master_address
def owns_connection(self, connection):
check = not self.is_master or (
self.is_master and self.master_address == (connection.host, connection.port)
)
parent = super()
return check and parent.owns_connection(connection)
def get_master_address(self):
return self.proxy.get_master_address()
def rotate_slaves(self):
"Round-robin slave balancer"
return self.proxy.rotate_slaves()
| SentinelConnectionPool |
python | mlflow__mlflow | examples/pytorch/CaptumExample/Titanic_Captum_Interpret.py | {
"start": 2349,
"end": 14340
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(12, 12)
self.sigmoid1 = nn.Sigmoid()
self.linear2 = nn.Linear(12, 8)
self.sigmoid2 = nn.Sigmoid()
self.linear3 = nn.Linear(8, 2)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
lin1_out = self.linear1(x)
sigmoid_out1 = self.sigmoid1(lin1_out)
sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1))
return self.softmax(self.linear3(sigmoid_out2))
def prepare():
RANDOM_SEED = 42
titanic_data = get_titanic()
print(titanic_data)
labels = titanic_data["survived"].to_numpy()
titanic_data = titanic_data.drop(["survived"], axis=1)
feature_names = list(titanic_data.columns)
data = titanic_data.to_numpy()
# Separate training and test sets using
train_features, test_features, train_labels, test_labels = train_test_split(
data, labels, test_size=0.3, random_state=RANDOM_SEED, stratify=labels
)
train_features = np.vstack(train_features[:, :]).astype(np.float32)
test_features = np.vstack(test_features[:, :]).astype(np.float32)
return train_features, train_labels, test_features, test_labels, feature_names
def count_model_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad:
continue
param = parameter.nonzero(as_tuple=False).size(0)
table.add_row([name, param])
total_params += param
return table, total_params
def visualize_importances(
feature_names,
importances,
title="Average Feature Importances",
plot=True,
axis_title="Features",
):
print(title)
feature_imp = PrettyTable(["feature_name", "importances"])
feature_imp_dict = {}
for i in range(len(feature_names)):
print(feature_names[i], ": ", f"{importances[i]:.3f}")
feature_imp.add_row([feature_names[i], importances[i]])
feature_imp_dict[str(feature_names[i])] = importances[i]
x_pos = np.arange(len(feature_names))
if plot:
fig, ax = plt.subplots(figsize=(12, 6))
ax.bar(x_pos, importances, align="center")
ax.set(title=title, xlabel=axis_title)
ax.set_xticks(x_pos)
ax.set_xticklabels(feature_names, rotation="vertical")
mlflow.log_figure(fig, title + ".png")
return feature_imp, feature_imp_dict
def train(USE_PRETRAINED_MODEL=False):
net = TitanicSimpleNNModel()
train_features, train_labels, test_features, test_labels, feature_names = prepare()
USE_PRETRAINED_MODEL = dict_args["use_pretrained_model"]
if USE_PRETRAINED_MODEL:
net.load_state_dict(torch.load("models/titanic_state_dict.pt"))
net.eval()
print("Model Loaded!")
else:
criterion = nn.CrossEntropyLoss()
num_epochs = dict_args["max_epochs"]
mlflow.log_param("epochs", num_epochs)
mlflow.log_param("lr", dict_args["lr"])
optimizer = torch.optim.Adam(net.parameters(), lr=dict_args["lr"])
print(train_features.dtype)
input_tensor = torch.from_numpy(train_features).type(torch.FloatTensor)
label_tensor = torch.from_numpy(train_labels)
for epoch in range(num_epochs):
output = net(input_tensor)
loss = criterion(output, label_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 50 == 0:
print(f"Epoch {epoch + 1}/{num_epochs} => Train Loss: {loss.item():.2f}")
mlflow.log_metric(
f"Epoch {epoch + 1!s} Loss",
float(loss.item()),
step=epoch,
)
if not os.path.isdir("models"):
os.makedirs("models")
torch.save(net.state_dict(), "models/titanic_state_dict.pt")
summary, _ = count_model_parameters(net)
mlflow.log_text(str(summary), "model_summary.txt")
return (
net,
train_features,
train_labels,
test_features,
test_labels,
feature_names,
)
def compute_accuracy(net, features, labels, title=None):
input_tensor = torch.from_numpy(features).type(torch.FloatTensor)
out_probs = net(input_tensor).detach().numpy()
out_classes = np.argmax(out_probs, axis=1)
mlflow.log_metric(title, float(sum(out_classes == labels) / len(labels)))
print(title, sum(out_classes == labels) / len(labels))
return input_tensor
def feature_conductance(net, test_input_tensor):
"""
The method takes tensor(s) of input examples (matching the forward function of the model),
and returns the input attributions for the given input example.
The returned values of the attribute method are the attributions,
which match the size of the given inputs, and delta,
which approximates the error between the approximated integral and true integral.
This method saves the distribution of avg attributions of the trained features for the given target.
"""
ig = IntegratedGradients(net)
test_input_tensor.requires_grad_()
attr, _ = ig.attribute(test_input_tensor, target=1, return_convergence_delta=True)
attr = attr.detach().numpy()
# To understand these attributions, we can first average them across all the inputs and print and visualize the average attribution for each feature.
feature_imp, feature_imp_dict = visualize_importances(feature_names, np.mean(attr, axis=0))
mlflow.log_metrics(feature_imp_dict)
mlflow.log_text(str(feature_imp), "feature_imp_summary.txt")
fig, (ax1, ax2) = plt.subplots(2, 1)
fig.tight_layout(pad=3)
ax1.hist(attr[:, 1], 100)
ax1.set(title="Distribution of Sibsp Attribution Values")
# we can bucket the examples by the value of the sibsp feature and plot the average attribution for the feature.
# In the plot below, the size of the dot is proportional to the number of examples with that value.
bin_means, bin_edges, _ = stats.binned_statistic(
test_features[:, 1], attr[:, 1], statistic="mean", bins=6
)
bin_count, _, _ = stats.binned_statistic(
test_features[:, 1], attr[:, 1], statistic="count", bins=6
)
bin_width = bin_edges[1] - bin_edges[0]
bin_centers = bin_edges[1:] - bin_width / 2
ax2.scatter(bin_centers, bin_means, s=bin_count)
ax2.set(xlabel="Average Sibsp Feature Value", ylabel="Average Attribution")
mlflow.log_figure(fig, "Average_Sibsp_Feature_Value.png")
def layer_conductance(net, test_input_tensor):
"""
To use Layer Conductance, we create a LayerConductance object passing in the model as well as the module (layer) whose output we would like to understand.
In this case, we choose net.sigmoid1, the output of the first hidden layer.
Now obtain the conductance values for all the test examples by calling attribute on the LayerConductance object.
LayerConductance also requires a target index for networks with multiple outputs, defining the index of the output for which gradients are computed.
Similar to feature attributions, we provide target = 1, corresponding to survival.
LayerConductance also utilizes a baseline, but we simply use the default zero baseline as in integrated gradients.
"""
cond = LayerConductance(net, net.sigmoid1)
cond_vals = cond.attribute(test_input_tensor, target=1)
cond_vals = cond_vals.detach().numpy()
# We can begin by visualizing the average conductance for each neuron.
neuron_names = ["neuron " + str(x) for x in range(12)]
avg_neuron_imp, neuron_imp_dict = visualize_importances(
neuron_names,
np.mean(cond_vals, axis=0),
title="Average Neuron Importances",
axis_title="Neurons",
)
mlflow.log_metrics(neuron_imp_dict)
mlflow.log_text(str(avg_neuron_imp), "neuron_imp_summary.txt")
# We can also look at the distribution of each neuron's attributions. Below we look at the distributions for neurons 7 and 9,
# and we can confirm that their attribution distributions are very close to 0, suggesting they are not learning substantial features.
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 6))
fig.tight_layout(pad=3)
ax1.hist(cond_vals[:, 9], 100)
ax1.set(title="Neuron 9 Distribution")
ax2.hist(cond_vals[:, 7], 100)
ax2.set(title="Neuron 7 Distribution")
mlflow.log_figure(fig, "Neurons_Distribution.png")
def neuron_conductance(net, test_input_tensor, neuron_selector=None):
"""
We have identified that some of the neurons are not learning important features, while others are.
Can we now understand what each of these important neurons are looking at in the input?
For instance, are they identifying different features in the input or similar ones?
To answer these questions, we can apply the third type of attributions available in Captum, **Neuron Attributions**.
This allows us to understand what parts of the input contribute to activating a particular input neuron. For this example,
we will apply Neuron Conductance, which divides the neuron's total conductance value into the contribution from each individual input feature.
To use Neuron Conductance, we create a NeuronConductance object, analogously to Conductance,
passing in the model as well as the module (layer) whose output we would like to understand, in this case, net.sigmoid1, as before.
"""
neuron_selector = 0
neuron_cond = NeuronConductance(net, net.sigmoid1)
# We can now obtain the neuron conductance values for all the test examples by calling attribute on the NeuronConductance object.
# Neuron Conductance requires the neuron index in the target layer for which attributions are requested as well as the target index for networks with multiple outputs,
# similar to layer conductance. As before, we provide target = 1, corresponding to survival, and compute neuron conductance for neurons 0 and 10, the significant neurons identified above.
# The neuron index can be provided either as a tuple or as just an integer if the layer output is 1-dimensional.
neuron_cond_vals = neuron_cond.attribute(
test_input_tensor, neuron_selector=neuron_selector, target=1
)
neuron_cond, _ = visualize_importances(
feature_names,
neuron_cond_vals.mean(dim=0).detach().numpy(),
title=f"Average Feature Importances for Neuron {neuron_selector}",
)
mlflow.log_text(
str(neuron_cond), "Avg_Feature_Importances_Neuron_" + str(neuron_selector) + ".txt"
)
if __name__ == "__main__":
parser = ArgumentParser(description="Titanic Captum Example")
parser.add_argument(
"--use_pretrained_model",
default=False,
metavar="N",
help="Use pretrained model or train from the scratch",
)
parser.add_argument(
"--max_epochs",
type=int,
default=100,
metavar="N",
help="Number of epochs to be used for training",
)
parser.add_argument(
"--lr",
type=float,
default=0.1,
metavar="LR",
help="learning rate (default: 0.1)",
)
args = parser.parse_args()
dict_args = vars(args)
with mlflow.start_run(run_name="Titanic_Captum_mlflow"):
net, train_features, train_labels, test_features, test_labels, feature_names = train()
compute_accuracy(net, train_features, train_labels, title="Train Accuracy")
test_input_tensor = compute_accuracy(net, test_features, test_labels, title="Test Accuracy")
feature_conductance(net, test_input_tensor)
layer_conductance(net, test_input_tensor)
neuron_conductance(net, test_input_tensor)
mlflow.log_param("Train Size", len(train_labels))
mlflow.log_param("Test Size", len(test_labels))
| TitanicSimpleNNModel |
python | django__django | django/test/testcases.py | {
"start": 63114,
"end": 65162
} | class ____(threading.Thread):
"""Thread for running a live HTTP server while the tests are running."""
server_class = ThreadedWSGIServer
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server(
connections_override=self.connections_override,
)
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, connections_override=None):
return self.server_class(
(self.host, self.port),
QuietWSGIRequestHandler,
allow_reuse_address=False,
connections_override=connections_override,
)
def terminate(self):
if hasattr(self, "httpd"):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
| LiveServerThread |
python | django__django | django/core/files/uploadedfile.py | {
"start": 2747,
"end": 3467
} | class ____(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(
self,
file,
field_name,
name,
content_type,
size,
charset,
content_type_extra=None,
):
super().__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
return self
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
| InMemoryUploadedFile |
python | PyCQA__pylint | tests/functional/s/singledispatch/singledispatchmethod_function.py | {
"start": 1113,
"end": 1541
} | class ____:
@singledispatchmethod
@staticmethod
def convert_position(position):
pass
@convert_position.register
@staticmethod
def _(position: str) -> tuple:
position_a, position_b = position.split(",")
return (int(position_a), int(position_b))
@convert_position.register
@staticmethod
def _(position: tuple) -> str:
return f"{position[0]},{position[1]}"
| Board2 |
python | walkccc__LeetCode | solutions/2190. Most Frequent Number Following Key In an Array/2190.py | {
"start": 0,
"end": 245
} | class ____:
def mostFrequent(self, nums: list[int], key: int) -> int:
count = collections.Counter()
for a, b in itertools.pairwise(nums):
if a == key:
count[b] += 1
return max(count, key=lambda num: count[num])
| Solution |
python | django__django | tests/auth_tests/test_models.py | {
"start": 19751,
"end": 20950
} | class ____(TestCase):
"""
Tests the behavior of the guaranteed is_active attribute
"""
def test_builtin_user_isactive(self):
user = User.objects.create(username="foo", email="foo@bar.com")
# is_active is true by default
self.assertIs(user.is_active, True)
user.is_active = False
user.save()
user_fetched = User.objects.get(pk=user.pk)
# the is_active flag is saved
self.assertFalse(user_fetched.is_active)
@override_settings(AUTH_USER_MODEL="auth_tests.IsActiveTestUser1")
def test_is_active_field_default(self):
"""
tests that the default value for is_active is provided
"""
UserModel = get_user_model()
user = UserModel(username="foo")
self.assertIs(user.is_active, True)
# you can set the attribute - but it will not save
user.is_active = False
# there should be no problem saving - but the attribute is not saved
user.save()
user_fetched = UserModel._default_manager.get(pk=user.pk)
# the attribute is always true for newly retrieved instance
self.assertIs(user_fetched.is_active, True)
| IsActiveTestCase |
python | doocs__leetcode | solution/3000-3099/3090.Maximum Length Substring With Two Occurrences/Solution.py | {
"start": 0,
"end": 312
} | class ____:
def maximumLengthSubstring(self, s: str) -> int:
cnt = Counter()
ans = i = 0
for j, c in enumerate(s):
cnt[c] += 1
while cnt[c] > 2:
cnt[s[i]] -= 1
i += 1
ans = max(ans, j - i + 1)
return ans
| Solution |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 2022,
"end": 4377
} | class ____(SyncAPIResource):
@cached_property
def client_secrets(self) -> ClientSecrets:
return ClientSecrets(self._client)
@cached_property
def calls(self) -> Calls:
from ...lib._realtime import _Calls
return _Calls(self._client)
@cached_property
def with_raw_response(self) -> RealtimeWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return RealtimeWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> RealtimeWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return RealtimeWithStreamingResponse(self)
def connect(
self,
*,
call_id: str | Omit = omit,
model: str | Omit = omit,
extra_query: Query = {},
extra_headers: Headers = {},
websocket_connection_options: WebsocketConnectionOptions = {},
) -> RealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
Some notable benefits of the API include:
- Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
- Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
- Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
The Realtime API is a stateful, event-based API that communicates over a WebSocket.
"""
return RealtimeConnectionManager(
client=self._client,
extra_query=extra_query,
extra_headers=extra_headers,
websocket_connection_options=websocket_connection_options,
call_id=call_id,
model=model,
)
| Realtime |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 13026,
"end": 14281
} | class ____(Benchmark):
params = (['euclidean', 'minkowski', 'cityblock',
'seuclidean', 'sqeuclidean', 'cosine', 'correlation',
'hamming', 'jaccard', 'jensenshannon', 'chebyshev', 'canberra',
'braycurtis', 'mahalanobis', 'yule', 'dice',
'rogerstanimoto', 'russellrao', 'sokalsneath',
'minkowski-P3'])
param_names = ['metric']
def setup(self, metric):
rng = np.random.default_rng(123)
self.points = rng.random((2, 3))
self.metric = metric
if metric == 'minkowski-P3':
# p=2 is just the euclidean metric, try another p value as well
self.kwargs = {'p': 3.0}
self.metric = 'minkowski'
elif metric == 'mahalanobis':
self.kwargs = {'VI': [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]}
elif metric == 'seuclidean':
self.kwargs = {'V': [1, 0.1, 0.1]}
else:
self.kwargs = {}
def time_dist(self, metric):
"""Time distance metrics individually (without batching with
cdist or pdist).
"""
getattr(distance, self.metric)(self.points[0], self.points[1],
**self.kwargs)
| SingleDist |
python | geekcomputers__Python | Industrial_developed_hangman/tests/test_hangman/test_main.py | {
"start": 362,
"end": 2598
} | class ____(object):
def __init__(self, values_to_input: List[str]) -> None:
self.values_to_input: List[str] = values_to_input
def __call__(self) -> str:
return self.values_to_input.pop(0)
@pytest.fixture
def choice_fn() -> Callable:
return lambda array: array[0] # noqa: E731
@pytest.mark.internet_required
def test_parse_word_from_site() -> None:
assert isinstance(parse_word_from_site(), str)
def test_parse_word_from_site_no_internet() -> None:
with requests_mock.Mocker() as mock:
mock.get("https://random-word-api.herokuapp.com/word", text='["some text"]')
assert parse_word_from_site() == "some text"
def test_parse_word_from_site_err() -> None:
with pytest.raises(RuntimeError):
parse_word_from_site(url="https://www.google.com/dsfsdfds/sdfsdf/sdfds")
def test_get_word(choice_fn: Callable) -> None:
fk_print = FkPrint()
fk_input = FkInput(["none"])
main_process = MainProcess(
Source(1), pr_func=fk_print, in_func=fk_input, ch_func=choice_fn
)
assert isinstance(main_process.get_word(), str)
def test_start_game_win(choice_fn: Callable) -> None:
fk_print = FkPrint()
fk_input = FkInput(["j", "a", "m"])
main_process = MainProcess(
Source(0), pr_func=fk_print, in_func=fk_input, ch_func=choice_fn
)
main_process.start_game()
assert "YOU WON" in fk_print.container[-1]
@pytest.mark.parametrize(
"input_str", [[letter] * 10 for letter in "qwertyuiopasdfghjklzxcvbnm"]
) # noqa: WPS435
def test_start_game_loose(input_str: List[str], choice_fn: Callable) -> None:
fk_print = FkPrint()
fk_input = FkInput(input_str)
main_process = MainProcess(
Source(0), pr_func=fk_print, in_func=fk_input, ch_func=choice_fn
)
main_process.start_game()
assert "YOU LOST" in fk_print.container[-1]
def test_wow_year(freezer, choice_fn: Callable) -> None:
freezer.move_to("2135-10-17")
fk_print = FkPrint()
fk_input = FkInput(["none"] * 100) # noqa: WPS435
main_process = MainProcess(
Source(0), pr_func=fk_print, in_func=fk_input, ch_func=choice_fn
)
main_process.start_game()
assert "this program" in fk_print.container[0]
| FkInput |
python | doocs__leetcode | solution/2900-2999/2992.Number of Self-Divisible Permutations/Solution.py | {
"start": 0,
"end": 420
} | class ____:
def selfDivisiblePermutationCount(self, n: int) -> int:
@cache
def dfs(mask: int) -> int:
i = mask.bit_count() + 1
if i > n:
return 1
ans = 0
for j in range(1, n + 1):
if (mask >> j & 1) == 0 and gcd(i, j) == 1:
ans += dfs(mask | 1 << j)
return ans
return dfs(0)
| Solution |
python | pypa__setuptools | setuptools/tests/config/test_apply_pyprojecttoml.py | {
"start": 18635,
"end": 19420
} | class ____:
def test_pyproject_sets_attribute(self, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
pyproject = Path("pyproject.toml")
toml_config = """
[project]
name = "test"
version = "42.0"
[tool.setuptools]
ext-modules = [
{name = "my.ext", sources = ["hello.c", "world.c"]}
]
"""
pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
with pytest.warns(pyprojecttoml._ExperimentalConfiguration):
dist = pyprojecttoml.apply_configuration(Distribution({}), pyproject)
assert len(dist.ext_modules) == 1
assert dist.ext_modules[0].name == "my.ext"
assert set(dist.ext_modules[0].sources) == {"hello.c", "world.c"}
| TestExtModules |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 30994,
"end": 31514
} | class ____(ChainedSource):
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from(utils.__name__, "dataclass_fields")
)
codegen(self.base)
codegen.extend_output(create_call_function(1, False))
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"___dataclass_fields({self.base.name()})"
@dataclasses.dataclass(frozen=True)
| DataclassFieldsSource |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_mlengine.py | {
"start": 1544,
"end": 29456
} | class ____:
def setup_method(self):
self.hook = hook.MLEngineHook()
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.build")
def test_mle_engine_client_creation(self, mock_build, mock_authorize):
result = self.hook.get_conn()
assert mock_build.return_value == result
mock_build.assert_called_with("ml", "v1", http=mock_authorize.return_value, cache_discovery=False)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_version(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
version_name = "test-version"
version = {"name": version_name, "labels": {"other-label": "test-value"}}
version_with_airflow_version = {
"name": "test-version",
"labels": {"other-label": "test-value", "airflow-version": hook._AIRFLOW_VERSION},
}
operation_path = f"projects/{project_id}/operations/test-operation"
model_path = f"projects/{project_id}/models/{model_name}"
operation_done = {"name": operation_path, "done": True}
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.create.return_value.execute.return_value
) = version
(
mock_get_conn.return_value.projects.return_value.operations.return_value.get.return_value.execute.return_value
) = {"name": operation_path, "done": True}
create_version_response = self.hook.create_version(
project_id=project_id, model_name=model_name, version_spec=deepcopy(version)
)
assert create_version_response == operation_done
mock_get_conn.assert_has_calls(
[
mock.call()
.projects()
.models()
.versions()
.create(body=version_with_airflow_version, parent=model_path),
mock.call().projects().models().versions().create().execute(num_retries=5),
mock.call().projects().operations().get(name=version_name),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_version_with_labels(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
version_name = "test-version"
version = {"name": version_name}
version_with_airflow_version = {
"name": "test-version",
"labels": {"airflow-version": hook._AIRFLOW_VERSION},
}
operation_path = f"projects/{project_id}/operations/test-operation"
model_path = f"projects/{project_id}/models/{model_name}"
operation_done = {"name": operation_path, "done": True}
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.create.return_value.execute.return_value
) = version
(
mock_get_conn.return_value.projects.return_value.operations.return_value.get.return_value.execute.return_value
) = {"name": operation_path, "done": True}
create_version_response = self.hook.create_version(
project_id=project_id, model_name=model_name, version_spec=deepcopy(version)
)
assert create_version_response == operation_done
mock_get_conn.assert_has_calls(
[
mock.call()
.projects()
.models()
.versions()
.create(body=version_with_airflow_version, parent=model_path),
mock.call().projects().models().versions().create().execute(num_retries=5),
mock.call().projects().operations().get(name=version_name),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_set_default_version(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
version_name = "test-version"
operation_path = f"projects/{project_id}/operations/test-operation"
version_path = f"projects/{project_id}/models/{model_name}/versions/{version_name}"
operation_done = {"name": operation_path, "done": True}
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.setDefault.return_value.execute.return_value
) = operation_done
set_default_version_response = self.hook.set_default_version(
project_id=project_id, model_name=model_name, version_name=version_name
)
assert set_default_version_response == operation_done
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().versions().setDefault(body={}, name=version_path),
mock.call().projects().models().versions().setDefault().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.time.sleep")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_list_versions(self, mock_get_conn, mock_sleep):
project_id = "test-project"
model_name = "test-model"
model_path = f"projects/{project_id}/models/{model_name}"
version_names = [f"ver_{ix}" for ix in range(3)]
response_bodies = [
{"nextPageToken": f"TOKEN-{ix}", "versions": [ver]} for ix, ver in enumerate(version_names)
]
response_bodies[-1].pop("nextPageToken")
pages_requests = [mock.Mock(**{"execute.return_value": body}) for body in response_bodies]
versions_mock = mock.Mock(
**{"list.return_value": pages_requests[0], "list_next.side_effect": pages_requests[1:] + [None]}
)
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value
) = versions_mock
list_versions_response = self.hook.list_versions(project_id=project_id, model_name=model_name)
assert list_versions_response == version_names
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().versions().list(pageSize=100, parent=model_path),
mock.call().projects().models().versions().list().execute(num_retries=5),
]
+ [
mock.call()
.projects()
.models()
.versions()
.list_next(previous_request=pages_requests[i], previous_response=response_bodies[i])
for i in range(3)
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_delete_version(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
version_name = "test-version"
operation_path = f"projects/{project_id}/operations/test-operation"
version_path = f"projects/{project_id}/models/{model_name}/versions/{version_name}"
version = {"name": operation_path}
operation_not_done = {"name": operation_path, "done": False}
operation_done = {"name": operation_path, "done": True}
(
mock_get_conn.return_value.projects.return_value.operations.return_value.get.return_value.execute.side_effect
) = [operation_not_done, operation_done]
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.delete.return_value.execute.return_value
) = version
delete_version_response = self.hook.delete_version(
project_id=project_id, model_name=model_name, version_name=version_name
)
assert delete_version_response == operation_done
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().versions().delete(name=version_path),
mock.call().projects().models().versions().delete().execute(num_retries=5),
mock.call().projects().operations().get(name=operation_path),
mock.call().projects().operations().get().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_model(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
model = {
"name": model_name,
}
model_with_airflow_version = {
"name": model_name,
"labels": {"airflow-version": hook._AIRFLOW_VERSION},
}
project_path = f"projects/{project_id}"
(
mock_get_conn.return_value.projects.return_value.models.return_value.create.return_value.execute.return_value
) = model
create_model_response = self.hook.create_model(project_id=project_id, model=deepcopy(model))
assert create_model_response == model
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().create(body=model_with_airflow_version, parent=project_path),
mock.call().projects().models().create().execute(num_retries=5),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_model_idempotency(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
model = {
"name": model_name,
}
model_with_airflow_version = {
"name": model_name,
"labels": {"airflow-version": hook._AIRFLOW_VERSION},
}
project_path = f"projects/{project_id}"
(
mock_get_conn.return_value.projects.return_value.models.return_value.create.return_value.execute.side_effect
) = [
HttpError(
resp=httplib2.Response({"status": 409}),
content=json.dumps(
{
"error": {
"code": 409,
"message": "Field: model.name Error: A model with the same name already exists.",
"status": "ALREADY_EXISTS",
"details": [
{
"@type": "type.googleapis.com/google.rpc.BadRequest",
"fieldViolations": [
{
"field": "model.name",
"description": "A model with the same name already exists.",
}
],
}
],
}
}
).encode(),
)
]
(
mock_get_conn.return_value.projects.return_value.models.return_value.get.return_value.execute.return_value
) = deepcopy(model)
create_model_response = self.hook.create_model(project_id=project_id, model=deepcopy(model))
assert create_model_response == model
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().create(body=model_with_airflow_version, parent=project_path),
mock.call().projects().models().create().execute(num_retries=5),
]
)
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().get(name="projects/test-project/models/test-model"),
mock.call().projects().models().get().execute(num_retries=5),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_model_with_labels(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
model = {"name": model_name, "labels": {"other-label": "test-value"}}
model_with_airflow_version = {
"name": model_name,
"labels": {"other-label": "test-value", "airflow-version": hook._AIRFLOW_VERSION},
}
project_path = f"projects/{project_id}"
(
mock_get_conn.return_value.projects.return_value.models.return_value.create.return_value.execute.return_value
) = model
create_model_response = self.hook.create_model(project_id=project_id, model=deepcopy(model))
assert create_model_response == model
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().create(body=model_with_airflow_version, parent=project_path),
mock.call().projects().models().create().execute(num_retries=5),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_get_model(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
model = {"model": model_name}
model_path = f"projects/{project_id}/models/{model_name}"
(
mock_get_conn.return_value.projects.return_value.models.return_value.get.return_value.execute.return_value
) = model
get_model_response = self.hook.get_model(project_id=project_id, model_name=model_name)
assert get_model_response == model
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().get(name=model_path),
mock.call().projects().models().get().execute(num_retries=5),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_delete_model(self, mock_get_conn):
project_id = "test-project"
model_name = "test-model"
model = {"model": model_name}
model_path = f"projects/{project_id}/models/{model_name}"
(
mock_get_conn.return_value.projects.return_value.models.return_value.delete.return_value.execute.return_value
) = model
self.hook.delete_model(project_id=project_id, model_name=model_name)
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().delete(name=model_path),
mock.call().projects().models().delete().execute(num_retries=5),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.log")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_delete_model_when_not_exists(self, mock_get_conn, mock_log):
project_id = "test-project"
model_name = "test-model"
model_path = f"projects/{project_id}/models/{model_name}"
http_error = HttpError(
resp=mock.MagicMock(status=404, reason="Model not found."), content=b"Model not found."
)
(
mock_get_conn.return_value.projects.return_value.models.return_value.delete.return_value.execute.side_effect
) = [http_error]
self.hook.delete_model(project_id=project_id, model_name=model_name)
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().delete(name=model_path),
mock.call().projects().models().delete().execute(num_retries=5),
]
)
mock_log.error.assert_called_once_with("Model was not found: %s", http_error)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.time.sleep")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_delete_model_with_contents(self, mock_get_conn, mock_sleep):
project_id = "test-project"
model_name = "test-model"
model_path = f"projects/{project_id}/models/{model_name}"
operation_path = f"projects/{project_id}/operations/test-operation"
operation_done = {"name": operation_path, "done": True}
version_names = ["AAA", "BBB", "CCC"]
versions = [
{
"name": f"projects/{project_id}/models/{model_name}/versions/{version_name}",
"isDefault": i == 0,
}
for i, version_name in enumerate(version_names)
]
(
mock_get_conn.return_value.projects.return_value.operations.return_value.get.return_value.execute.return_value
) = operation_done
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.list.return_value.execute.return_value
) = {"versions": versions}
(
mock_get_conn.return_value.projects.return_value.models.return_value.versions.return_value.list_next.return_value
) = None
self.hook.delete_model(project_id=project_id, model_name=model_name, delete_contents=True)
mock_get_conn.assert_has_calls(
[
mock.call().projects().models().delete(name=model_path),
mock.call().projects().models().delete().execute(num_retries=5),
]
+ [
mock.call()
.projects()
.models()
.versions()
.delete(
name=f"projects/{project_id}/models/{model_name}/versions/{version_name}",
)
for version_name in version_names
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.time.sleep")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_mlengine_job(self, mock_get_conn, mock_sleep):
project_id = "test-project"
job_id = "test-job-id"
project_path = f"projects/{project_id}"
job_path = f"projects/{project_id}/jobs/{job_id}"
new_job = {
"jobId": job_id,
"foo": 4815162342,
}
new_job_with_airflow_version = {
"jobId": job_id,
"foo": 4815162342,
"labels": {"airflow-version": hook._AIRFLOW_VERSION},
}
job_succeeded = {
"jobId": job_id,
"state": "SUCCEEDED",
}
job_queued = {
"jobId": job_id,
"state": "QUEUED",
}
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.create.return_value.execute.return_value
) = job_queued
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.get.return_value.execute.side_effect
) = [job_queued, job_succeeded]
create_job_response = self.hook.create_job(project_id=project_id, job=deepcopy(new_job))
assert create_job_response == job_succeeded
mock_get_conn.assert_has_calls(
[
mock.call().projects().jobs().create(body=new_job_with_airflow_version, parent=project_path),
mock.call().projects().jobs().get(name=job_path),
mock.call().projects().jobs().get().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.time.sleep")
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_mlengine_job_with_labels(self, mock_get_conn, mock_sleep):
project_id = "test-project"
job_id = "test-job-id"
project_path = f"projects/{project_id}"
job_path = f"projects/{project_id}/jobs/{job_id}"
new_job = {"jobId": job_id, "foo": 4815162342, "labels": {"other-label": "test-value"}}
new_job_with_airflow_version = {
"jobId": job_id,
"foo": 4815162342,
"labels": {"other-label": "test-value", "airflow-version": hook._AIRFLOW_VERSION},
}
job_succeeded = {
"jobId": job_id,
"state": "SUCCEEDED",
}
job_queued = {
"jobId": job_id,
"state": "QUEUED",
}
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.create.return_value.execute.return_value
) = job_queued
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.get.return_value.execute.side_effect
) = [job_queued, job_succeeded]
create_job_response = self.hook.create_job(project_id=project_id, job=deepcopy(new_job))
assert create_job_response == job_succeeded
mock_get_conn.assert_has_calls(
[
mock.call().projects().jobs().create(body=new_job_with_airflow_version, parent=project_path),
mock.call().projects().jobs().get(name=job_path),
mock.call().projects().jobs().get().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_mlengine_job_reuse_existing_job_by_default(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
project_path = f"projects/{project_id}"
job_path = f"projects/{project_id}/jobs/{job_id}"
job_succeeded = {
"jobId": job_id,
"foo": 4815162342,
"state": "SUCCEEDED",
}
error_job_exists = HttpError(resp=mock.MagicMock(status=409), content=b"Job already exists")
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.create.return_value.execute.side_effect
) = error_job_exists
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.get.return_value.execute.return_value
) = job_succeeded
create_job_response = self.hook.create_job(project_id=project_id, job=job_succeeded)
assert create_job_response == job_succeeded
mock_get_conn.assert_has_calls(
[
mock.call().projects().jobs().create(body=job_succeeded, parent=project_path),
mock.call().projects().jobs().create().execute(num_retries=5),
mock.call().projects().jobs().get(name=job_path),
mock.call().projects().jobs().get().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_mlengine_job_check_existing_job_failed(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
my_job = {
"jobId": job_id,
"foo": 4815162342,
"state": "SUCCEEDED",
"someInput": {"input": "someInput"},
}
different_job = {
"jobId": job_id,
"foo": 4815162342,
"state": "SUCCEEDED",
"someInput": {"input": "someDifferentInput"},
}
error_job_exists = HttpError(resp=mock.MagicMock(status=409), content=b"Job already exists")
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.create.return_value.execute.side_effect
) = error_job_exists
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.get.return_value.execute.return_value
) = different_job
def check_input(existing_job):
return existing_job.get("someInput") == my_job["someInput"]
with pytest.raises(HttpError):
self.hook.create_job(project_id=project_id, job=my_job, use_existing_job_fn=check_input)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_create_mlengine_job_check_existing_job_success(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
my_job = {
"jobId": job_id,
"foo": 4815162342,
"state": "SUCCEEDED",
"someInput": {"input": "someInput"},
}
error_job_exists = HttpError(resp=mock.MagicMock(status=409), content=b"Job already exists")
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.create.return_value.execute.side_effect
) = error_job_exists
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.get.return_value.execute.return_value
) = my_job
def check_input(existing_job):
return existing_job.get("someInput") == my_job["someInput"]
create_job_response = self.hook.create_job(
project_id=project_id, job=my_job, use_existing_job_fn=check_input
)
assert create_job_response == my_job
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_cancel_mlengine_job(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
job_path = f"projects/{project_id}/jobs/{job_id}"
job_cancelled = {}
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.cancel.return_value.execute.return_value
) = job_cancelled
cancel_job_response = self.hook.cancel_job(job_id=job_id, project_id=project_id)
assert cancel_job_response == job_cancelled
mock_get_conn.assert_has_calls(
[
mock.call().projects().jobs().cancel(name=job_path),
mock.call().projects().jobs().cancel().execute(num_retries=5),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_cancel_mlengine_job_nonexistent_job(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
job_cancelled = {}
error_job_does_not_exist = HttpError(resp=mock.MagicMock(status=404), content=b"Job does not exist")
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.cancel.return_value.execute.side_effect
) = error_job_does_not_exist
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.cancel.return_value.execute.return_value
) = job_cancelled
with pytest.raises(HttpError):
self.hook.cancel_job(job_id=job_id, project_id=project_id)
@mock.patch("airflow.providers.google.cloud.hooks.mlengine.MLEngineHook.get_conn")
def test_cancel_mlengine_job_completed_job(self, mock_get_conn):
project_id = "test-project"
job_id = "test-job-id"
job_path = f"projects/{project_id}/jobs/{job_id}"
job_cancelled = {}
error_job_already_completed = HttpError(
resp=mock.MagicMock(status=400), content=b"Job already completed"
)
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.cancel.return_value.execute.side_effect
) = error_job_already_completed
(
mock_get_conn.return_value.projects.return_value.jobs.return_value.cancel.return_value.execute.return_value
) = job_cancelled
cancel_job_response = self.hook.cancel_job(job_id=job_id, project_id=project_id)
assert cancel_job_response == job_cancelled
mock_get_conn.assert_has_calls(
[
mock.call().projects().jobs().cancel(name=job_path),
mock.call().projects().jobs().cancel().execute(num_retries=5),
],
any_order=True,
)
| TestMLEngineHook |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/config.py | {
"start": 5118,
"end": 9133
} | class ____:
__match_args__ = ("_name",)
__slots__ = ("_name", "_argname")
def __init__(self, case, argname, case_names):
self._name = case
self._argname = argname
for casename in case_names:
setattr(self, casename, casename == case)
if typing.TYPE_CHECKING:
def __getattr__(self, key: str) -> bool: ...
@property
def name(self):
return self._name
def __bool__(self):
return self._name == self._argname
def __nonzero__(self):
return not self.__bool__()
def __str__(self):
return f"{self._argname}={self._name!r}"
def __repr__(self):
return str(self)
def __eq__(self, value: object) -> bool:
if isinstance(value, str):
return self._name == value
elif isinstance(value, Variation):
return self.name == value.name and self._argname == self._argname
else:
return NotImplemented
def fail(self) -> NoReturn:
fail(f"Unknown {self}")
@classmethod
def idfn(cls, variation):
return variation.name
@classmethod
def generate_cases(cls, argname, cases):
case_names = [
argname if c is True else "not_" + argname if c is False else c
for c in cases
]
typ = type(
argname,
(Variation,),
{
"__slots__": tuple(case_names),
},
)
return [typ(casename, argname, case_names) for casename in case_names]
def variation(argname_or_fn, cases=None):
"""a helper around testing.combinations that provides a single namespace
that can be used as a switch.
e.g.::
@testing.variation("querytyp", ["select", "subquery", "legacy_query"])
@testing.variation("lazy", ["select", "raise", "raise_on_sql"])
def test_thing(self, querytyp, lazy, decl_base):
class Thing(decl_base):
__tablename__ = "thing"
# use name directly
rel = relationship("Rel", lazy=lazy.name)
# use as a switch
if querytyp.select:
stmt = select(Thing)
elif querytyp.subquery:
stmt = select(Thing).subquery()
elif querytyp.legacy_query:
stmt = Session.query(Thing)
else:
querytyp.fail()
The variable provided is a slots object of boolean variables, as well
as the name of the case itself under the attribute ".name"
"""
if inspect.isfunction(argname_or_fn):
argname = argname_or_fn.__name__
cases = argname_or_fn(None)
@variation_fixture(argname, cases)
def go(self, request):
yield request.param
return go
else:
argname = argname_or_fn
cases_plus_limitations = [
(
entry
if (isinstance(entry, tuple) and len(entry) == 2)
else (entry, None)
)
for entry in cases
]
variations = Variation.generate_cases(
argname, [c for c, l in cases_plus_limitations]
)
return combinations(
*[
(
(variation._name, variation, limitation)
if limitation is not None
else (variation._name, variation)
)
for variation, (case, limitation) in zip(
variations, cases_plus_limitations
)
],
id_="ia",
argnames=argname,
)
def variation_fixture(argname, cases, scope="function"):
return fixture(
params=Variation.generate_cases(argname, cases),
ids=Variation.idfn,
scope=scope,
)
def fixture(*arg: Any, **kw: Any) -> Any:
return _fixture_functions.fixture(*arg, **kw)
def get_current_test_name() -> str:
return _fixture_functions.get_current_test_name()
def mark_base_test_class() -> Any:
return _fixture_functions.mark_base_test_class()
| Variation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault3.py | {
"start": 929,
"end": 1001
} | class ____(Generic[*Ts0, T3]): ...
# This should generate an error.
| ClassE |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-couchbase/source_couchbase/source.py | {
"start": 418,
"end": 3215
} | class ____(AbstractSource):
def __init__(self):
super().__init__()
self.connection_string = None
self.username = None
self.password = None
self.bucket_name = None
@property
def name(self) -> str:
return "Couchbase"
def _set_config_values(self, config: Mapping[str, Any]):
self.connection_string = config["connection_string"]
self.username = config["username"]
self.password = config["password"]
self.bucket_name = config["bucket"]
def _get_cluster(self) -> Cluster:
auth = PasswordAuthenticator(self.username, self.password)
options = ClusterOptions(auth)
options.apply_profile("wan_development")
cluster = Cluster(self.connection_string, options)
cluster.wait_until_ready(timedelta(seconds=5))
return cluster
@staticmethod
def _ensure_primary_index(cluster: Cluster, bucket: str, scope: str, collection: str):
index_name = f"{bucket}_{scope}_{collection}_primary_index"
query = f"CREATE PRIMARY INDEX IF NOT EXISTS `{index_name}` ON `{bucket}`.`{scope}`.`{collection}`"
logging.debug(f"Executing query to ensure primary index: {query}")
try:
cluster.query(query).execute()
logging.debug(f"Successfully ensured primary index for {bucket}.{scope}.{collection}")
except Exception as e:
logging.warning(f"Failed to create primary index for {bucket}.{scope}.{collection}: {str(e)}")
def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
self._set_config_values(config)
try:
cluster = self._get_cluster()
bucket = cluster.bucket(self.bucket_name)
bucket.ping()
logger.info("Successfully connected to Couchbase cluster and bucket")
return True, None
except Exception as e:
logger.error(f"Connection check failed: {str(e)}")
return False, f"Connection check failed: {str(e)}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
self._set_config_values(config)
cluster = self._get_cluster()
bucket = cluster.bucket(self.bucket_name)
streams = []
for scope in bucket.collections().get_all_scopes():
for collection in scope.collections:
self._ensure_primary_index(cluster, self.bucket_name, scope.name, collection.name)
stream = DocumentStream(cluster, self.bucket_name, scope.name, collection.name)
streams.append(stream)
logging.info(f"Added stream for {scope.name}.{collection.name}")
logging.info(f"Generated {len(streams)} streams")
return streams
| SourceCouchbase |
python | nryoung__algorithms | tests/test_data_structures.py | {
"start": 20875,
"end": 21753
} | class ____(unittest.TestCase):
"""
Test Union Find Implementation
"""
def test_union_find_by_rank(self):
self.uf = union_find_by_rank.UnionFindByRank(6)
self.uf.make_set(6)
self.uf.union(1, 0)
self.uf.union(3, 4)
self.uf.union(2, 4)
self.uf.union(5, 2)
self.uf.union(6, 5)
self.assertEqual(self.uf.find(1), 1)
self.assertEqual(self.uf.find(3), 3)
# test tree is created by rank
self.uf.union(5, 0)
self.assertEqual(self.uf.find(2), 3)
self.assertEqual(self.uf.find(5), 3)
self.assertEqual(self.uf.find(6), 3)
self.assertEqual(self.uf.find(0), 3)
self.assertEqual(self.uf.is_connected(0, 1), True)
self.assertEqual(self.uf.is_connected(3, 4), True)
self.assertEqual(self.uf.is_connected(5, 3), True)
| TestUnionFindByRank |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/chat_store/base_db.py | {
"start": 202,
"end": 457
} | class ____(str, Enum):
"""Status of a message in the chat store."""
# Message is in the active FIFO queue
ACTIVE = "active"
# Message has been processed and is archived, removed from the active queue
ARCHIVED = "archived"
| MessageStatus |
python | TheAlgorithms__Python | data_structures/binary_tree/maximum_sum_bst.py | {
"start": 148,
"end": 2143
} | class ____:
val: int = 0
left: TreeNode | None = None
right: TreeNode | None = None
def max_sum_bst(root: TreeNode | None) -> int:
"""
The solution traverses a binary tree to find the maximum sum of
keys in any subtree that is a Binary Search Tree (BST). It uses
recursion to validate BST properties and calculates sums, returning
the highest sum found among all valid BST subtrees.
>>> t1 = TreeNode(4)
>>> t1.left = TreeNode(3)
>>> t1.left.left = TreeNode(1)
>>> t1.left.right = TreeNode(2)
>>> print(max_sum_bst(t1))
2
>>> t2 = TreeNode(-4)
>>> t2.left = TreeNode(-2)
>>> t2.right = TreeNode(-5)
>>> print(max_sum_bst(t2))
0
>>> t3 = TreeNode(1)
>>> t3.left = TreeNode(4)
>>> t3.left.left = TreeNode(2)
>>> t3.left.right = TreeNode(4)
>>> t3.right = TreeNode(3)
>>> t3.right.left = TreeNode(2)
>>> t3.right.right = TreeNode(5)
>>> t3.right.right.left = TreeNode(4)
>>> t3.right.right.right = TreeNode(6)
>>> print(max_sum_bst(t3))
20
"""
ans: int = 0
def solver(node: TreeNode | None) -> tuple[bool, int, int, int]:
"""
Returns the maximum sum by making recursive calls
>>> t1 = TreeNode(1)
>>> print(solver(t1))
1
"""
nonlocal ans
if not node:
return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum
is_left_valid, min_left, max_left, sum_left = solver(node.left)
is_right_valid, min_right, max_right, sum_right = solver(node.right)
if is_left_valid and is_right_valid and max_left < node.val < min_right:
total_sum = sum_left + sum_right + node.val
ans = max(ans, total_sum)
return True, min(min_left, node.val), max(max_right, node.val), total_sum
return False, -1, -1, -1 # Not a valid BST
solver(root)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
| TreeNode |
python | numpy__numpy | numpy/testing/_private/utils.py | {
"start": 53151,
"end": 75361
} | class ____(unittest.TestCase):
def nop(self):
pass
_d = _Dummy('nop')
def assert_raises(*args, **kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
assert_raises(exception_class)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
Alternatively, `assert_raises` can be used as a context manager:
>>> from numpy.testing import assert_raises
>>> with assert_raises(ZeroDivisionError):
... 1 / 0
is equivalent to
>>> def div(x, y):
... return x / y
>>> assert_raises(ZeroDivisionError, div, 1, 0)
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaises(*args, **kwargs)
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
assert_raises_regex(exception_class, expected_regexp, callable, *args,
**kwargs)
assert_raises_regex(exception_class, expected_regexp)
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Alternatively, can be used as a context manager like `assert_raises`.
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
def measure(code_str, times=1, label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> times = 10
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
>>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs, globs = frame.f_locals, frame.f_globals
code = compile(code_str, f'Test name: {label} ', 'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec(code, globs, locs)
elapsed = jiffies() - elapsed
return 0.01 * elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
if not HAS_REFCOUNT:
return True
import gc
import numpy as np
b = np.arange(100 * 100).reshape(100, 100)
c = b
i = 1
gc.disable()
try:
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
finally:
gc.enable()
def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
err_msg='', verbose=True, *, strict=False):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
Given two array_like objects, check that their shapes and all elements
are equal (but see the Notes for the special handling of a scalar). An
exception is raised if the shapes mismatch or any values conflict. In
contrast to the standard usage in numpy, NaNs are compared like numbers,
no assertion is raised if both objects have NaNs in the same positions.
The test is equivalent to ``allclose(actual, desired, rtol, atol)``,
except that it is stricter: it doesn't broadcast its operands, and has
tighter default tolerance values. It compares the difference between
`actual` and `desired` to ``atol + rtol * abs(desired)``.
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
equal_nan : bool, optional.
If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
strict : bool, optional
If True, raise an ``AssertionError`` when either the shape or the data
type of the arguments does not match. The special handling of scalars
mentioned in the Notes section is disabled.
.. versionadded:: 2.0.0
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Notes
-----
When one of `actual` and `desired` is a scalar and the other is array_like, the
function performs the comparison as if the scalar were broadcasted to the shape
of the array. Note that empty arrays are therefore considered equal to scalars.
This behaviour can be disabled by setting ``strict==True``.
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
As mentioned in the Notes section, `assert_allclose` has special
handling for scalars. Here, the test checks that the value of `numpy.sin`
is nearly zero at integer multiples of π.
>>> x = np.arange(3) * np.pi
>>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15)
Use `strict` to raise an ``AssertionError`` when comparing an array
with one or more dimensions against a scalar.
>>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15, strict=True)
Traceback (most recent call last):
...
AssertionError:
Not equal to tolerance rtol=1e-07, atol=1e-15
<BLANKLINE>
(shapes (3,), () mismatch)
ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16])
DESIRED: array(0)
The `strict` parameter also ensures that the array data types match:
>>> y = np.zeros(3, dtype=np.float32)
>>> np.testing.assert_allclose(np.sin(x), y, atol=1e-15, strict=True)
Traceback (most recent call last):
...
AssertionError:
Not equal to tolerance rtol=1e-07, atol=1e-15
<BLANKLINE>
(dtypes float64, float32 mismatch)
ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16])
DESIRED: array([0., 0., 0.], dtype=float32)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
def compare(x, y):
return np._core.numeric.isclose(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header, equal_nan=equal_nan,
strict=strict)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
Traceback (most recent call last):
...
AssertionError: Arrays are not equal to 1 ULP (max is 2)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x - y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = f"Arrays are not equal to {nulp} ULP"
else:
max_nulp = np.max(nulp_diff(x, y))
msg = f"Arrays are not equal to {nulp} ULP (max is {max_nulp:g})"
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g "
"ULP (max difference is %g ULP)" %
(maxulp, np.max(ret)))
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
dtype : dtype, optional
Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.asarray(x, dtype=dtype)
y = np.asarray(y, dtype=dtype)
else:
x = np.asarray(x)
y = np.asarray(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array([x], dtype=t)
y = np.array([y], dtype=t)
x[np.isnan(x)] = np.nan
y[np.isnan(y)] = np.nan
if not x.shape == y.shape:
raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}")
def _diff(rx, ry, vdt):
diff = np.asarray(rx - ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx < 0]
elif rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation
of x."""
import numpy as np
if x.dtype == np.float16:
return _integer_repr(x, np.int16, np.int16(-2**15))
elif x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError(f'Unsupported dtype {x.dtype}')
@contextlib.contextmanager
def _assert_warns_context(warning_class, name=None):
__tracebackhide__ = True # Hide traceback for py.test
with suppress_warnings(_warn=False) as sup:
l = sup.record(warning_class)
yield
if not len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError("No warning raised" + name_str)
def assert_warns(warning_class, *args, **kwargs):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught.
If called with all arguments other than the warning class omitted, may be
used as a context manager::
with assert_warns(SomeWarning):
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. deprecated:: 2.4
This is deprecated. Use `warnings.catch_warnings` or
``pytest.warns`` instead.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable, optional
Callable to test
*args : Arguments
Arguments for `func`.
**kwargs : Kwargs
Keyword arguments for `func`.
Returns
-------
The value returned by `func`.
Examples
--------
>>> import warnings
>>> def deprecated_func(num):
... warnings.warn("Please upgrade", DeprecationWarning)
... return num*num
>>> with np.testing.assert_warns(DeprecationWarning):
... assert deprecated_func(4) == 16
>>> # or passing a func
>>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
>>> assert ret == 16
"""
warnings.warn(
"NumPy warning suppression and assertion utilities are deprecated. "
"Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, "
"or pytest.filterwarnings instead. (Deprecated NumPy 2.4)",
DeprecationWarning, stacklevel=2)
if not args and not kwargs:
return _assert_warns_context(warning_class)
elif len(args) < 1:
if "match" in kwargs:
raise RuntimeError(
"assert_warns does not use 'match' kwarg, "
"use pytest.warns instead"
)
raise RuntimeError("assert_warns(...) needs at least one arg")
func = args[0]
args = args[1:]
with _assert_warns_context(warning_class, name=func.__name__):
return func(*args, **kwargs)
@contextlib.contextmanager
def _assert_no_warnings_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
yield
if len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(f'Got warnings{name_str}: {l}')
def assert_no_warnings(*args, **kwargs):
"""
Fail if the given callable produces any warnings.
If called with all arguments omitted, may be used as a context manager::
with assert_no_warnings():
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_no_warnings_context()
func = args[0]
args = args[1:]
with _assert_no_warnings_context(name=func.__name__):
return func(*args, **kwargs)
def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == 'unary':
inp = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
d = inp()
yield d, d, ufmt % (o, o, s, dtype, 'in place')
yield out[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'out of place')
yield inp()[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'aliased')
yield inp()[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'aliased')
if type == 'binary':
inp1 = lambda: arange(s, dtype=dtype)[o:]
inp2 = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'out of place')
d = inp1()
yield d, d, inp2(), bfmt % \
(o, o, o, s, dtype, 'in place1')
d = inp2()
yield d, inp1(), d, bfmt % \
(o, o, o, s, dtype, 'in place2')
yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'out of place')
yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'aliased')
| _Dummy |
python | MongoEngine__mongoengine | tests/fields/test_sequence_field.py | {
"start": 68,
"end": 9148
} | class ____(MongoDBTestCase):
def test_sequence_field(self):
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
ids = [i.id for i in Person.objects]
assert ids == list(range(1, 11))
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
Person.id.set_next_value(1000)
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 1000
def test_sequence_field_get_next_value(self):
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
assert Person.id.get_next_value() == 11
self.db["mongoengine.counters"].drop()
assert Person.id.get_next_value() == 1
class Person(Document):
id = SequenceField(primary_key=True, value_decorator=str)
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
assert Person.id.get_next_value() == "11"
self.db["mongoengine.counters"].drop()
assert Person.id.get_next_value() == "1"
def test_sequence_field_sequence_name(self):
class Person(Document):
id = SequenceField(primary_key=True, sequence_name="jelly")
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
c = self.db["mongoengine.counters"].find_one({"_id": "jelly.id"})
assert c["next"] == 10
ids = [i.id for i in Person.objects]
assert ids == list(range(1, 11))
c = self.db["mongoengine.counters"].find_one({"_id": "jelly.id"})
assert c["next"] == 10
Person.id.set_next_value(1000)
c = self.db["mongoengine.counters"].find_one({"_id": "jelly.id"})
assert c["next"] == 1000
def test_multiple_sequence_fields(self):
class Person(Document):
id = SequenceField(primary_key=True)
counter = SequenceField()
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
ids = [i.id for i in Person.objects]
assert ids == list(range(1, 11))
counters = [i.counter for i in Person.objects]
assert counters == list(range(1, 11))
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
Person.id.set_next_value(1000)
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 1000
Person.counter.set_next_value(999)
c = self.db["mongoengine.counters"].find_one({"_id": "person.counter"})
assert c["next"] == 999
def test_sequence_fields_reload(self):
class Animal(Document):
counter = SequenceField()
name = StringField()
self.db["mongoengine.counters"].drop()
Animal.drop_collection()
a = Animal(name="Boi").save()
assert a.counter == 1
a.reload()
assert a.counter == 1
a.counter = None
assert a.counter == 2
a.save()
assert a.counter == 2
a = Animal.objects.first()
assert a.counter == 2
a.reload()
assert a.counter == 2
def test_multiple_sequence_fields_on_docs(self):
class Animal(Document):
id = SequenceField(primary_key=True)
name = StringField()
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db["mongoengine.counters"].drop()
Animal.drop_collection()
Person.drop_collection()
for x in range(10):
Animal(name="Animal %s" % x).save()
Person(name="Person %s" % x).save()
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
c = self.db["mongoengine.counters"].find_one({"_id": "animal.id"})
assert c["next"] == 10
ids = [i.id for i in Person.objects]
assert ids == list(range(1, 11))
_id = [i.id for i in Animal.objects]
assert _id == list(range(1, 11))
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
c = self.db["mongoengine.counters"].find_one({"_id": "animal.id"})
assert c["next"] == 10
def test_sequence_field_value_decorator(self):
class Person(Document):
id = SequenceField(primary_key=True, value_decorator=str)
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
p = Person(name="Person %s" % x)
p.save()
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
ids = [i.id for i in Person.objects]
assert ids == [str(i) for i in range(1, 11)]
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
def test_embedded_sequence_field(self):
class Comment(EmbeddedDocument):
id = SequenceField()
content = StringField(required=True)
class Post(Document):
title = StringField(required=True)
comments = ListField(EmbeddedDocumentField(Comment))
self.db["mongoengine.counters"].drop()
Post.drop_collection()
Post(
title="MongoEngine",
comments=[
Comment(content="NoSQL Rocks"),
Comment(content="MongoEngine Rocks"),
],
).save()
c = self.db["mongoengine.counters"].find_one({"_id": "comment.id"})
assert c["next"] == 2
post = Post.objects.first()
assert 1 == post.comments[0].id
assert 2 == post.comments[1].id
def test_inherited_sequencefield(self):
class Base(Document):
name = StringField()
counter = SequenceField()
meta = {"abstract": True}
class Foo(Base):
pass
class Bar(Base):
pass
bar = Bar(name="Bar")
bar.save()
foo = Foo(name="Foo")
foo.save()
assert "base.counter" in self.db["mongoengine.counters"].find().distinct("_id")
assert not (
("foo.counter" or "bar.counter")
in self.db["mongoengine.counters"].find().distinct("_id")
)
assert foo.counter != bar.counter
assert foo._fields["counter"].owner_document == Base
assert bar._fields["counter"].owner_document == Base
def test_no_inherited_sequencefield(self):
class Base(Document):
name = StringField()
meta = {"abstract": True}
class Foo(Base):
counter = SequenceField()
class Bar(Base):
counter = SequenceField()
bar = Bar(name="Bar")
bar.save()
foo = Foo(name="Foo")
foo.save()
assert "base.counter" not in self.db["mongoengine.counters"].find().distinct(
"_id"
)
existing_counters = self.db["mongoengine.counters"].find().distinct("_id")
assert "foo.counter" in existing_counters
assert "bar.counter" in existing_counters
assert foo.counter == bar.counter
assert foo._fields["counter"].owner_document == Foo
assert bar._fields["counter"].owner_document == Bar
def test_sequence_setattr_not_incrementing_counter(self):
class Person(DynamicDocument):
id = SequenceField(primary_key=True)
name = StringField()
self.db["mongoengine.counters"].drop()
Person.drop_collection()
for x in range(10):
Person(name="Person %s" % x).save()
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
# Setting SequenceField field value should not increment counter:
new_person = Person()
new_person.id = 1100
# Counter should still be at 10
c = self.db["mongoengine.counters"].find_one({"_id": "person.id"})
assert c["next"] == 10
| TestSequenceField |
python | python-attrs__attrs | tests/test_validators.py | {
"start": 27532,
"end": 29124
} | class ____:
"""
Tests for `min_len`.
"""
MIN_LENGTH = 2
def test_in_all(self):
"""
validator is in ``__all__``.
"""
assert min_len.__name__ in validator_module.__all__
def test_retrieve_min_len(self):
"""
The configured min. length can be extracted from the Attribute
"""
@attr.s
class Tester:
value = attr.ib(validator=min_len(self.MIN_LENGTH))
assert fields(Tester).value.validator.min_length == self.MIN_LENGTH
@pytest.mark.parametrize(
"value",
[
"foo",
"spam",
list(range(MIN_LENGTH)),
{"spam": 3, "eggs": 4},
],
)
def test_check_valid(self, value):
"""
Silent if len(value) => min_len.
Values can be strings and other iterables.
"""
@attr.s
class Tester:
value = attr.ib(validator=min_len(self.MIN_LENGTH))
Tester(value) # shouldn't raise exceptions
@pytest.mark.parametrize(
"value",
[
"",
list(range(1)),
],
)
def test_check_invalid(self, value):
"""
Raise ValueError if len(value) < min_len.
"""
@attr.s
class Tester:
value = attr.ib(validator=min_len(self.MIN_LENGTH))
with pytest.raises(ValueError):
Tester(value)
def test_repr(self):
"""
__repr__ is meaningful.
"""
assert repr(min_len(23)) == "<min_len validator for 23>"
| TestMinLen |
python | eventlet__eventlet | eventlet/green/http/client.py | {
"start": 10595,
"end": 31052
} | class ____(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
"""Always returns True"""
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b and return the number
of bytes read.
"""
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk. dicard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(b''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return b"".join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
n = self.fp.readinto(temp_mvb)
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
try:
result = self.fp.read1(n)
except ValueError:
if n >= 0:
raise
# some implementations, like BufferedReader, don't support -1
# Read an arbitrarily selected largeish chunk.
result = self.fp.read1(16*1024)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
'''Returns an instance of the class mimetools.Message containing
meta-information associated with the URL.
When the method is HTTP, these headers are those returned by
the server at the head of the retrieved HTML page (including
Content-Length and Content-Type).
When the method is FTP, a Content-Length header will be
present if (as is now usual) the server passed back a file
length in response to the FTP retrieval request. A
Content-Type header will be present if the MIME type can be
guessed.
When the method is local-file, returned headers will include
a Date representing the file's last-modified time, a
Content-Length giving file size, and a Content-Type
containing a guess at the file's type. See also the
description of the mimetools module.
'''
return self.headers
def geturl(self):
'''Return the real URL of the page.
In some cases, the HTTP server redirects a client to another
URL. The urlopen() function handles this transparently, but in
some cases the caller needs to know which URL the client was
redirected to. The geturl() method can be used to get at this
redirected URL.
'''
return self.url
def getcode(self):
'''Return the HTTP status code that was sent with the response,
or None if the URL is not an HTTP URL.
'''
return self.status
| HTTPResponse |
python | streamlit__streamlit | lib/tests/streamlit/web/server/oauth_authlib_routes_test.py | {
"start": 4930,
"end": 5748
} | class ____(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
return tornado.web.Application(
[
(
r"/auth/logout",
AuthLogoutHandler,
{"base_url": ""},
)
]
)
def test_logout_success(self):
"""Test logout handler success clear cookie."""
response = self.fetch("/auth/logout", follow_redirects=False)
assert response.code == 302
assert response.headers["Location"] == "/"
assert '_streamlit_user="";' in response.headers["Set-Cookie"]
@patch(
"streamlit.auth_util.secrets_singleton",
MagicMock(
load_if_toml_exists=MagicMock(return_value=True),
get=MagicMock(return_value=SECRETS_MOCK),
),
)
| LogoutHandlerTest |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 36201,
"end": 37004
} | class ____(ColumnConstraintWithMetadata):
def __init__(self, minim=None, maxim=None, columns=None, raise_or_typecheck=True):
self.name = self.__class__.__name__
description = f"Confirms values are between {minim} and {maxim}"
super().__init__(
description=description,
validation_fn=column_range_validation_factory(minim=minim, maxim=maxim),
resulting_exception=ColumnWithMetadataException,
raise_or_typecheck=raise_or_typecheck,
)
self.columns = columns
def validate(self, data, *args, **kwargs):
if self.columns is None:
self.columns = list(data.columns)
self.columns.extend(args)
return super().validate(data, *self.columns, **kwargs)
| ColumnRangeConstraintWithMetadata |
python | kamyu104__LeetCode-Solutions | Python/flip-game-ii.py | {
"start": 2293,
"end": 3032
} | class ____(object):
def canWin(self, s):
"""
:type s: str
:rtype: bool
"""
i, n = 0, len(s) - 1
is_win = False
while not is_win and i < n: # O(n) time
if s[i] == '+':
while not is_win and i < n and s[i+1] == '+': # O(c) time
# t(n, c) = c * (t(n, c-1) + n) + n = ...
# = c! * t(n, 0) + n * c! * (c + 1) * (1/0! + 1/1! + ... 1/c!)
# = n * c! + n * c! * (c + 1) * O(e) = O(c * n * c!)
is_win = not self.canWin(s[:i] + '--' + s[i+2:]) # O(n) space
i += 1
i += 1
return is_win
| Solution3 |
python | gevent__gevent | src/gevent/_ffi/watcher.py | {
"start": 18119,
"end": 18568
} | class ____(object):
_watcher_type = 'async'
def send(self):
raise NotImplementedError()
def send_ignoring_arg(self, _ignored):
"""
Calling compatibility with ``greenlet.switch(arg)``
as used by waiters that have ``rawlink``.
This is an advanced method, not usually needed.
"""
return self.send()
@property
def pending(self):
raise NotImplementedError()
| AsyncMixin |
python | PyCQA__pylint | tests/functional/u/unexpected_keyword_arg.py | {
"start": 1997,
"end": 4130
} | class ____:
pass
@DecoratorClass
def crash_test():
pass
crash_test(internal_arg=2) # [unexpected-keyword-arg]
# Test that we don't emit a false positive for uninferable decorators
@unknown_decorator
def crash_test_two():
pass
crash_test_two(internal_arg=2)
# Test that we don't crash on decorators that don't return anything
def no_return_decorator(func):
print(func)
@no_return_decorator
def test_no_return():
pass
test_no_return(internal_arg=2) # [unexpected-keyword-arg]
def ambiguous_func1(arg1):
print(arg1)
def ambiguous_func2(other_arg1):
print(other_arg1)
func1 = ambiguous_func1 if unknown else ambiguous_func2
func1(other_arg1=1)
def ambiguous_func3(arg1=None):
print(arg1)
func2 = ambiguous_func1 if unknown else ambiguous_func3
func2()
def ambiguous_func4(arg1=print):
print(arg1)
def ambiguous_func5(arg1=input):
print(arg1)
def ambiguous_func6(arg1=42):
print(arg1)
# Two functions with same keyword argument but different defaults (names)
func3 = ambiguous_func4 if unknown else ambiguous_func5
func3()
# Two functions with same keyword argument but different defaults (constants)
func4 = ambiguous_func3 if unknown else ambiguous_func6
func4()
# Two functions with same keyword argument but mixed defaults (names, constant)
func5 = ambiguous_func3 if unknown else ambiguous_func5
func5()
# pylint: disable=unused-argument
if do_something():
class AmbiguousClass:
def __init__(self, feeling="fine"):
...
else:
class AmbiguousClass:
def __init__(self, feeling="fine", thinking="hard"):
...
AmbiguousClass(feeling="so-so")
AmbiguousClass(thinking="carefully")
AmbiguousClass(worrying="little") # we could raise here if we infer_all()
if do_something():
class NotAmbiguousClass:
def __init__(self, feeling="fine"):
...
else:
class NotAmbiguousClass:
def __init__(self, feeling="fine"):
...
NotAmbiguousClass(feeling="so-so")
NotAmbiguousClass(worrying="little") # [unexpected-keyword-arg]
# pylint: enable=unused-argument
| DecoratorClass |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/translate.py | {
"start": 5836,
"end": 6676
} | class ____(AbstractContextManager[None]):
"""
Run a block with current node set in the visitor.
Parameters
----------
visitor
The internal Rust visitor object
n
The node to set as the current root.
Notes
-----
This is useful for translating expressions with a given node
active, restoring the node when the block exits.
"""
__slots__ = ("n", "visitor")
visitor: NodeTraverser
n: int
def __init__(self, visitor: NodeTraverser, n: int) -> None:
self.visitor = visitor
self.n = n
def __enter__(self) -> None:
n = self.visitor.get_node()
self.visitor.set_node(self.n)
self.n = n
def __exit__(self, *args: Any) -> None:
self.visitor.set_node(self.n)
noop_context: nullcontext[None] = nullcontext()
| set_node |
python | sqlalchemy__sqlalchemy | test/base/test_examples.py | {
"start": 712,
"end": 799
} | class ____(test_qualify.QualifyCompileTest, fixtures.TestBase):
pass
| QualifyCompileTest |
python | django-guardian__django-guardian | guardian/exceptions.py | {
"start": 457,
"end": 571
} | class ____(GuardianError):
"""Raised when the app name for a permission is incorrect."""
pass
| WrongAppError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outreach/components.py | {
"start": 214,
"end": 1157
} | class ____(RecordExtractor):
def extract_records(self, response: requests.Response, **kwargs) -> List[Mapping[str, Any]]:
data = response.json().get("data")
extracted_records = []
self.primary_key = "id"
if not data:
return extracted_records
for element in data:
relationships: Dict[str, List[int]] = dict()
for r_type, relations in element.get("relationships", {}).items():
if relations.get("data"):
data = relations.get("data", [])
if isinstance(data, dict):
data = [data]
relationships[f"{r_type}"] = [e.get("id") for e in data]
extracted_record = {**element.get("attributes"), **{self.primary_key: element[self.primary_key], **relationships}}
extracted_records.append(extracted_record)
return extracted_records
| CustomExtractor |
python | pytorch__pytorch | torch/_ops.py | {
"start": 59612,
"end": 62243
} | class ____(types.ModuleType):
__file__ = "_ops.py"
def __init__(self):
super().__init__("torch.ops")
self.loaded_libraries = set()
self.higher_order = _HigherOrderNamespace()
self._dir = []
def __getattr__(self, name: str) -> _OpNamespace:
# Here we are creating `torch.ops.my_namespace`
namespace = _OpNamespace(name)
setattr(self, name, namespace)
self._dir.append(name)
return namespace
def __iter__(self) -> Iterator[str]:
return iter(self._dir)
def import_module(self, module):
"""
Imports a Python module that has torch.library registrations.
Generally, to extend PyTorch with custom operators, a user will
create a Python module whose import triggers registration of
the custom operators via a torch.ops.load_library call or a call
to one or more torch.library.* APIs.
It is unexpected for Python modules to have side effects, so some
linters and formatters will complain. Use this API to import Python
modules that contain these torch.library side effects.
Args:
module (str): The name of the Python module to import
"""
importlib.import_module(module)
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom operators with the PyTorch JIT runtime. This allows dynamically
loading custom operators. For this, you should compile your operator
and the static registration code into a shared library object, and then
call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
shared object.
After the library is loaded, it is added to the
``torch.ops.loaded_libraries`` attribute, a set that may be inspected
for the paths of all libraries loaded using this function.
Args:
path (str): A path to a shared library to load.
"""
path = _utils_internal.resolve_library_path(path)
with dl_open_guard():
# Import the shared library into the process, thus running its
# static (global) initialization code in order to register custom
# operators with the JIT.
try:
ctypes.CDLL(path)
except Exception as e:
raise OSError(f"Could not load this library: {path}") from e
self.loaded_libraries.add(path)
# The ops "namespace"
ops: _Ops = _Ops()
| _Ops |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/tooltips.py | {
"start": 85,
"end": 410
} | class ____(App[None]):
TOOLTIP_DELAY = 0.4
def compose(self) -> ComposeResult:
progress_bar = ProgressBar(100, show_eta=False)
progress_bar.advance(10)
progress_bar.tooltip = "Hello, Tooltip!"
yield progress_bar
if __name__ == "__main__":
app = TooltipApp()
app.run()
| TooltipApp |
python | pypa__warehouse | warehouse/tasks.py | {
"start": 860,
"end": 1117
} | class ____(celery.backends.redis.RedisBackend):
def _params_from_url(self, url, defaults):
params = super()._params_from_url(url, defaults)
params.update({"connection_class": self.redis.SSLConnection})
return params
| TLSRedisBackend |
python | apache__airflow | airflow-core/src/airflow/models/dag_version.py | {
"start": 1522,
"end": 7980
} | class ____(Base):
"""Model to track the versions of DAGs in the database."""
__tablename__ = "dag_version"
id: Mapped[str] = mapped_column(UUIDType(binary=False), primary_key=True, default=uuid6.uuid7)
version_number: Mapped[int] = mapped_column(Integer, nullable=False, default=1)
dag_id: Mapped[str] = mapped_column(
StringID(), ForeignKey("dag.dag_id", ondelete="CASCADE"), nullable=False
)
dag_model = relationship("DagModel", back_populates="dag_versions")
bundle_name: Mapped[str | None] = mapped_column(StringID(), nullable=True)
bundle_version: Mapped[str | None] = mapped_column(StringID(), nullable=True)
bundle = relationship(
"DagBundleModel",
primaryjoin="foreign(DagVersion.bundle_name) == DagBundleModel.name",
uselist=False,
viewonly=True,
)
dag_code = relationship(
"DagCode",
back_populates="dag_version",
uselist=False,
cascade="all, delete, delete-orphan",
cascade_backrefs=False,
)
serialized_dag = relationship(
"SerializedDagModel",
back_populates="dag_version",
uselist=False,
cascade="all, delete, delete-orphan",
cascade_backrefs=False,
)
task_instances = relationship("TaskInstance", back_populates="dag_version")
created_at: Mapped[datetime] = mapped_column(UtcDateTime, nullable=False, default=timezone.utcnow)
last_updated: Mapped[datetime] = mapped_column(
UtcDateTime, nullable=False, default=timezone.utcnow, onupdate=timezone.utcnow
)
__table_args__ = (
UniqueConstraint("dag_id", "version_number", name="dag_id_v_name_v_number_unique_constraint"),
)
def __repr__(self):
"""Represent the object as a string."""
return f"<DagVersion {self.dag_id} {self.version}>"
@property
def bundle_url(self) -> str | None:
"""Render the bundle URL using the joined bundle metadata if available."""
# Prefer using the joined bundle relationship when present to avoid extra queries
if getattr(self, "bundle", None) is not None and hasattr(self.bundle, "signed_url_template"):
return self.bundle.render_url(self.bundle_version)
# fallback to the deprecated option if the bundle model does not have a signed_url_template
# attribute
if self.bundle_name is None:
return None
try:
return DagBundlesManager().view_url(self.bundle_name, self.bundle_version)
except ValueError:
return None
@classmethod
@provide_session
def write_dag(
cls,
*,
dag_id: str,
bundle_name: str,
bundle_version: str | None = None,
version_number: int = 1,
session: Session = NEW_SESSION,
) -> DagVersion:
"""
Write a new DagVersion into database.
Checks if a version of the DAG exists and increments the version number if it does.
:param dag_id: The DAG ID.
:param version_number: The version number.
:param session: The database session.
:return: The DagVersion object.
"""
existing_dag_version = session.scalar(
with_row_locks(cls._latest_version_select(dag_id), of=DagVersion, session=session, nowait=True)
)
if existing_dag_version:
version_number = existing_dag_version.version_number + 1
dag_version = DagVersion(
dag_id=dag_id,
version_number=version_number,
bundle_name=bundle_name,
bundle_version=bundle_version,
)
log.debug("Writing DagVersion %s to the DB", dag_version)
session.add(dag_version)
log.debug("DagVersion %s written to the DB", dag_version)
return dag_version
@classmethod
def _latest_version_select(
cls,
dag_id: str,
bundle_version: str | None = None,
load_dag_model: bool = False,
load_bundle_model: bool = False,
) -> Select:
"""
Get the select object to get the latest version of the DAG.
:param dag_id: The DAG ID.
:return: The select object.
"""
query = select(cls).where(cls.dag_id == dag_id)
if bundle_version:
query = query.where(cls.bundle_version == bundle_version)
if load_dag_model:
query = query.options(joinedload(cls.dag_model))
if load_bundle_model:
query = query.options(joinedload(cls.bundle))
query = query.order_by(cls.created_at.desc()).limit(1)
return query
@classmethod
@provide_session
def get_latest_version(
cls,
dag_id: str,
*,
bundle_version: str | None = None,
load_dag_model: bool = False,
load_bundle_model: bool = False,
session: Session = NEW_SESSION,
) -> DagVersion | None:
"""
Get the latest version of the DAG.
:param dag_id: The DAG ID.
:param session: The database session.
:param load_dag_model: Whether to load the DAG model.
:param load_bundle_model: Whether to load the DagBundle model.
:return: The latest version of the DAG or None if not found.
"""
return session.scalar(
cls._latest_version_select(
dag_id,
bundle_version=bundle_version,
load_dag_model=load_dag_model,
load_bundle_model=load_bundle_model,
)
)
@classmethod
@provide_session
def get_version(
cls,
dag_id: str,
version_number: int | None = None,
*,
session: Session = NEW_SESSION,
) -> DagVersion | None:
"""
Get the version of the DAG.
:param dag_id: The DAG ID.
:param version_number: The version number.
:param session: The database session.
:return: The version of the DAG or None if not found.
"""
version_select_obj = select(cls).where(cls.dag_id == dag_id)
if version_number:
version_select_obj = version_select_obj.where(cls.version_number == version_number)
return session.scalar(version_select_obj.order_by(cls.id.desc()).limit(1))
@property
def version(self) -> str:
"""A human-friendly representation of the version."""
return f"{self.dag_id}-{self.version_number}"
| DagVersion |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/layout.py | {
"start": 476,
"end": 13388
} | class ____:
"""
The layout for a prompt_toolkit
:class:`~prompt_toolkit.application.Application`.
This also keeps track of which user control is focused.
:param container: The "root" container for the layout.
:param focused_element: element to be focused initially. (Can be anything
the `focus` function accepts.)
"""
def __init__(
self,
container: AnyContainer,
focused_element: FocusableElement | None = None,
) -> None:
self.container = to_container(container)
self._stack: list[Window] = []
# Map search BufferControl back to the original BufferControl.
# This is used to keep track of when exactly we are searching, and for
# applying the search.
# When a link exists in this dictionary, that means the search is
# currently active.
# Map: search_buffer_control -> original buffer control.
self.search_links: dict[SearchBufferControl, BufferControl] = {}
# Mapping that maps the children in the layout to their parent.
# This relationship is calculated dynamically, each time when the UI
# is rendered. (UI elements have only references to their children.)
self._child_to_parent: dict[Container, Container] = {}
if focused_element is None:
try:
self._stack.append(next(self.find_all_windows()))
except StopIteration as e:
raise InvalidLayoutError(
"Invalid layout. The layout does not contain any Window object."
) from e
else:
self.focus(focused_element)
# List of visible windows.
self.visible_windows: list[Window] = [] # List of `Window` objects.
def __repr__(self) -> str:
return f"Layout({self.container!r}, current_window={self.current_window!r})"
def find_all_windows(self) -> Generator[Window, None, None]:
"""
Find all the :class:`.UIControl` objects in this layout.
"""
for item in self.walk():
if isinstance(item, Window):
yield item
def find_all_controls(self) -> Iterable[UIControl]:
for container in self.find_all_windows():
yield container.content
def focus(self, value: FocusableElement) -> None:
"""
Focus the given UI element.
`value` can be either:
- a :class:`.UIControl`
- a :class:`.Buffer` instance or the name of a :class:`.Buffer`
- a :class:`.Window`
- Any container object. In this case we will focus the :class:`.Window`
from this container that was focused most recent, or the very first
focusable :class:`.Window` of the container.
"""
# BufferControl by buffer name.
if isinstance(value, str):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer.name == value:
self.focus(control)
return
raise ValueError(f"Couldn't find Buffer in the current layout: {value!r}.")
# BufferControl by buffer object.
elif isinstance(value, Buffer):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer == value:
self.focus(control)
return
raise ValueError(f"Couldn't find Buffer in the current layout: {value!r}.")
# Focus UIControl.
elif isinstance(value, UIControl):
if value not in self.find_all_controls():
raise ValueError(
"Invalid value. Container does not appear in the layout."
)
if not value.is_focusable():
raise ValueError("Invalid value. UIControl is not focusable.")
self.current_control = value
# Otherwise, expecting any Container object.
else:
value = to_container(value)
if isinstance(value, Window):
# This is a `Window`: focus that.
if value not in self.find_all_windows():
raise ValueError(
f"Invalid value. Window does not appear in the layout: {value!r}"
)
self.current_window = value
else:
# Focus a window in this container.
# If we have many windows as part of this container, and some
# of them have been focused before, take the last focused
# item. (This is very useful when the UI is composed of more
# complex sub components.)
windows = []
for c in walk(value, skip_hidden=True):
if isinstance(c, Window) and c.content.is_focusable():
windows.append(c)
# Take the first one that was focused before.
for w in reversed(self._stack):
if w in windows:
self.current_window = w
return
# None was focused before: take the very first focusable window.
if windows:
self.current_window = windows[0]
return
raise ValueError(
f"Invalid value. Container cannot be focused: {value!r}"
)
def has_focus(self, value: FocusableElement) -> bool:
"""
Check whether the given control has the focus.
:param value: :class:`.UIControl` or :class:`.Window` instance.
"""
if isinstance(value, str):
if self.current_buffer is None:
return False
return self.current_buffer.name == value
if isinstance(value, Buffer):
return self.current_buffer == value
if isinstance(value, UIControl):
return self.current_control == value
else:
value = to_container(value)
if isinstance(value, Window):
return self.current_window == value
else:
# Check whether this "container" is focused. This is true if
# one of the elements inside is focused.
for element in walk(value):
if element == self.current_window:
return True
return False
@property
def current_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to currently has the focus.
"""
return self._stack[-1].content
@current_control.setter
def current_control(self, control: UIControl) -> None:
"""
Set the :class:`.UIControl` to receive the focus.
"""
for window in self.find_all_windows():
if window.content == control:
self.current_window = window
return
raise ValueError("Control not found in the user interface.")
@property
def current_window(self) -> Window:
"Return the :class:`.Window` object that is currently focused."
return self._stack[-1]
@current_window.setter
def current_window(self, value: Window) -> None:
"Set the :class:`.Window` object to be currently focused."
self._stack.append(value)
@property
def is_searching(self) -> bool:
"True if we are searching right now."
return self.current_control in self.search_links
@property
def search_target_buffer_control(self) -> BufferControl | None:
"""
Return the :class:`.BufferControl` in which we are searching or `None`.
"""
# Not every `UIControl` is a `BufferControl`. This only applies to
# `BufferControl`.
control = self.current_control
if isinstance(control, SearchBufferControl):
return self.search_links.get(control)
else:
return None
def get_focusable_windows(self) -> Iterable[Window]:
"""
Return all the :class:`.Window` objects which are focusable (in the
'modal' area).
"""
for w in self.walk_through_modal_area():
if isinstance(w, Window) and w.content.is_focusable():
yield w
def get_visible_focusable_windows(self) -> list[Window]:
"""
Return a list of :class:`.Window` objects that are focusable.
"""
# focusable windows are windows that are visible, but also part of the
# modal container. Make sure to keep the ordering.
visible_windows = self.visible_windows
return [w for w in self.get_focusable_windows() if w in visible_windows]
@property
def current_buffer(self) -> Buffer | None:
"""
The currently focused :class:`~.Buffer` or `None`.
"""
ui_control = self.current_control
if isinstance(ui_control, BufferControl):
return ui_control.buffer
return None
def get_buffer_by_name(self, buffer_name: str) -> Buffer | None:
"""
Look in the layout for a buffer with the given name.
Return `None` when nothing was found.
"""
for w in self.walk():
if isinstance(w, Window) and isinstance(w.content, BufferControl):
if w.content.buffer.name == buffer_name:
return w.content.buffer
return None
@property
def buffer_has_focus(self) -> bool:
"""
Return `True` if the currently focused control is a
:class:`.BufferControl`. (For instance, used to determine whether the
default key bindings should be active or not.)
"""
ui_control = self.current_control
return isinstance(ui_control, BufferControl)
@property
def previous_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to previously had the focus.
"""
try:
return self._stack[-2].content
except IndexError:
return self._stack[-1].content
def focus_last(self) -> None:
"""
Give the focus to the last focused control.
"""
if len(self._stack) > 1:
self._stack = self._stack[:-1]
def focus_next(self) -> None:
"""
Focus the next visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index + 1) % len(windows)
self.focus(windows[index])
def focus_previous(self) -> None:
"""
Focus the previous visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index - 1) % len(windows)
self.focus(windows[index])
def walk(self) -> Iterable[Container]:
"""
Walk through all the layout nodes (and their children) and yield them.
"""
yield from walk(self.container)
def walk_through_modal_area(self) -> Iterable[Container]:
"""
Walk through all the containers which are in the current 'modal' part
of the layout.
"""
# Go up in the tree, and find the root. (it will be a part of the
# layout, if the focus is in a modal part.)
root: Container = self.current_window
while not root.is_modal() and root in self._child_to_parent:
root = self._child_to_parent[root]
yield from walk(root)
def update_parents_relations(self) -> None:
"""
Update child->parent relationships mapping.
"""
parents = {}
def walk(e: Container) -> None:
for c in e.get_children():
parents[c] = e
walk(c)
walk(self.container)
self._child_to_parent = parents
def reset(self) -> None:
# Remove all search links when the UI starts.
# (Important, for instance when control-c is been pressed while
# searching. The prompt cancels, but next `run()` call the search
# links are still there.)
self.search_links.clear()
self.container.reset()
def get_parent(self, container: Container) -> Container | None:
"""
Return the parent container for the given container, or ``None``, if it
wasn't found.
"""
try:
return self._child_to_parent[container]
except KeyError:
return None
| Layout |
python | django__django | tests/utils_tests/test_decorators.py | {
"start": 585,
"end": 734
} | class ____:
def __call__(self, request):
return HttpResponse()
class_process_view = process_view_dec(ClassProcessView())
| ClassProcessView |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/interface.py | {
"start": 7850,
"end": 11166
} | class ____:
"""
Aggregates the results across all heuristics.
It saves the individual results from each heuristic and exposes an aggregated view.
"""
_heuristic_results: dict[
HeuristicInterface, TestPrioritizations
] # Key is the Heuristic's name. Dicts will preserve the order of insertion, which is important for sharding
_all_tests: frozenset[str]
def __init__(self, all_tests: list[str]) -> None:
self._all_tests = frozenset(all_tests)
self._heuristic_results = {}
self.validate()
def validate(self) -> None:
for heuristic, heuristic_results in self._heuristic_results.items():
heuristic_results.validate()
assert heuristic_results._original_tests == self._all_tests, (
f"Tests in {heuristic.name} are not the same as the tests in the AggregatedHeuristics"
)
def add_heuristic_results(
self, heuristic: HeuristicInterface, heuristic_results: TestPrioritizations
) -> None:
if heuristic in self._heuristic_results:
raise ValueError(f"We already have heuristics for {heuristic.name}")
self._heuristic_results[heuristic] = heuristic_results
self.validate()
def get_aggregated_priorities(
self, include_trial: bool = False
) -> TestPrioritizations:
"""
Returns the aggregated priorities across all heuristics.
"""
valid_heuristics = {
heuristic: heuristic_results
for heuristic, heuristic_results in self._heuristic_results.items()
if not heuristic.trial_mode or include_trial
}
new_tp = TestPrioritizations(self._all_tests, {})
for heuristic_results in valid_heuristics.values():
for score, testrun in heuristic_results._traverse_scores():
new_tp.add_test_score(testrun, score)
new_tp.validate()
return new_tp
def get_test_stats(self, test: TestRun) -> dict[str, Any]:
"""
Returns the aggregated statistics for a given test.
"""
stats: dict[str, Any] = {
"test_name": test.test_file,
"test_filters": test.get_pytest_filter(),
}
# Get metrics about the heuristics used
heuristics = []
for heuristic, heuristic_results in self._heuristic_results.items():
metrics = heuristic_results.get_priority_info_for_test(test)
metrics["heuristic_name"] = heuristic.name
metrics["trial_mode"] = heuristic.trial_mode
heuristics.append(metrics)
stats["heuristics"] = heuristics
stats["aggregated"] = (
self.get_aggregated_priorities().get_priority_info_for_test(test)
)
stats["aggregated_trial"] = self.get_aggregated_priorities(
include_trial=True
).get_priority_info_for_test(test)
return stats
def to_json(self) -> dict[str, Any]:
"""
Returns a JSON dict that describes this AggregatedHeuristics object.
"""
json_dict: dict[str, Any] = {}
for heuristic, heuristic_results in self._heuristic_results.items():
json_dict[heuristic.name] = heuristic_results.to_json()
return json_dict
| AggregatedHeuristics |
python | python-poetry__poetry | src/poetry/console/commands/config.py | {
"start": 1359,
"end": 15704
} | class ____(Command):
name = "config"
description = "Manages configuration settings."
arguments: ClassVar[list[Argument]] = [
argument("key", "Setting key.", optional=True),
argument("value", "Setting value.", optional=True, multiple=True),
]
options: ClassVar[list[Option]] = [
option("list", None, "List configuration settings."),
option("unset", None, "Unset configuration setting."),
option("local", None, "Set/Get from the project's local configuration."),
option("migrate", None, "Migrate outdated configuration settings."),
]
help = """\
This command allows you to edit the poetry config settings and repositories.
To add a repository:
<comment>poetry config repositories.foo https://bar.com/simple/</comment>
To remove a repository (repo is a short alias for repositories):
<comment>poetry config --unset repo.foo</comment>"""
LIST_PROHIBITED_SETTINGS: ClassVar[set[str]] = {"http-basic", "pypi-token"}
@property
def unique_config_values(self) -> dict[str, tuple[Any, Any]]:
unique_config_values = {
"cache-dir": (str, lambda val: str(Path(val))),
"data-dir": (str, lambda val: str(Path(val))),
"virtualenvs.create": (boolean_validator, boolean_normalizer),
"virtualenvs.in-project": (boolean_validator, boolean_normalizer),
"virtualenvs.options.always-copy": (boolean_validator, boolean_normalizer),
"virtualenvs.options.system-site-packages": (
boolean_validator,
boolean_normalizer,
),
"virtualenvs.options.no-pip": (boolean_validator, boolean_normalizer),
"virtualenvs.path": (str, lambda val: str(Path(val))),
"virtualenvs.use-poetry-python": (boolean_validator, boolean_normalizer),
"virtualenvs.prompt": (str, str),
"system-git-client": (boolean_validator, boolean_normalizer),
"requests.max-retries": (lambda val: int(val) >= 0, int_normalizer),
"installer.re-resolve": (boolean_validator, boolean_normalizer),
"installer.parallel": (boolean_validator, boolean_normalizer),
"installer.max-workers": (lambda val: int(val) > 0, int_normalizer),
"installer.no-binary": (
PackageFilterPolicy.validator,
PackageFilterPolicy.normalize,
),
"installer.only-binary": (
PackageFilterPolicy.validator,
PackageFilterPolicy.normalize,
),
"solver.lazy-wheel": (boolean_validator, boolean_normalizer),
"keyring.enabled": (boolean_validator, boolean_normalizer),
"python.installation-dir": (str, lambda val: str(Path(val))),
}
return unique_config_values
def handle(self) -> int:
from pathlib import Path
from poetry.core.pyproject.exceptions import PyProjectError
from poetry.config.config import Config
from poetry.config.file_config_source import FileConfigSource
from poetry.locations import CONFIG_DIR
from poetry.toml.file import TOMLFile
if self.option("migrate"):
self._migrate()
config = Config.create()
config_file = TOMLFile(CONFIG_DIR / "config.toml")
try:
local_config_file = TOMLFile(self.poetry.file.path.parent / "poetry.toml")
if local_config_file.exists():
config.merge(local_config_file.read())
except (RuntimeError, PyProjectError):
local_config_file = TOMLFile(Path.cwd() / "poetry.toml")
if self.option("local"):
config.set_config_source(FileConfigSource(local_config_file))
if not config_file.exists():
config_file.path.parent.mkdir(parents=True, exist_ok=True)
config_file.path.touch(mode=0o0600)
if self.option("list"):
self._list_configuration(config.all(), config.raw())
return 0
setting_key = self.argument("key")
if not setting_key:
return 0
if self.argument("value") and self.option("unset"):
raise RuntimeError("You can not combine a setting value with --unset")
# show the value if no value is provided
if not self.argument("value") and not self.option("unset"):
if setting_key.split(".")[0] in self.LIST_PROHIBITED_SETTINGS:
raise ValueError(f"Expected a value for {setting_key} setting.")
value: str | dict[str, Any] | list[str]
if m := re.match(
r"installer\.build-config-settings(\.([^.]+))?", self.argument("key")
):
if not m.group(1):
if value := config.get("installer.build-config-settings"):
self._list_configuration(value, value)
else:
self.line("No packages configured with build config settings.")
else:
package_name = canonicalize_name(m.group(2))
key = f"installer.build-config-settings.{package_name}"
if value := config.get(key):
self.line(json.dumps(value))
else:
self.line(
f"No build config settings configured for <c1>{package_name}</>."
)
return 0
elif m := re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key")):
if not m.group(1):
value = {}
if config.get("repositories") is not None:
value = config.get("repositories")
else:
repo = config.get(f"repositories.{m.group(1)}")
if repo is None:
raise ValueError(f"There is no {m.group(1)} repository defined")
value = repo
self.line(str(value))
else:
if setting_key not in self.unique_config_values:
raise ValueError(f"There is no {setting_key} setting.")
value = config.get(setting_key)
if not isinstance(value, str):
value = json.dumps(value)
self.line(value)
return 0
values: list[str] = self.argument("value")
if setting_key in self.unique_config_values:
if self.option("unset"):
config.config_source.remove_property(setting_key)
return 0
return self._handle_single_value(
config.config_source,
setting_key,
self.unique_config_values[setting_key],
values,
)
# handle repositories
m = re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key"))
if m:
if not m.group(1):
raise ValueError("You cannot remove the [repositories] section")
if self.option("unset"):
repo = config.get(f"repositories.{m.group(1)}")
if repo is None:
raise ValueError(f"There is no {m.group(1)} repository defined")
config.config_source.remove_property(f"repositories.{m.group(1)}")
return 0
if len(values) == 1:
url = values[0]
config.config_source.add_property(f"repositories.{m.group(1)}.url", url)
return 0
raise ValueError(
"You must pass the url. "
"Example: poetry config repositories.foo https://bar.com"
)
# handle auth
m = re.match(r"^(http-basic|pypi-token)\.(.+)", self.argument("key"))
if m:
from poetry.utils.password_manager import PasswordManager
password_manager = PasswordManager(config)
if self.option("unset"):
if m.group(1) == "http-basic":
password_manager.delete_http_password(m.group(2))
elif m.group(1) == "pypi-token":
password_manager.delete_pypi_token(m.group(2))
return 0
if m.group(1) == "http-basic":
if len(values) == 1:
username = values[0]
# Only username, so we prompt for password
password = self.secret("Password:")
assert isinstance(password, str)
elif len(values) != 2:
raise ValueError(
"Expected one or two arguments "
f"(username, password), got {len(values)}"
)
else:
username = values[0]
password = values[1]
password_manager.set_http_password(m.group(2), username, password)
elif m.group(1) == "pypi-token":
if len(values) != 1:
raise ValueError(
f"Expected only one argument (token), got {len(values)}"
)
token = values[0]
password_manager.set_pypi_token(m.group(2), token)
return 0
# handle certs
m = re.match(r"certificates\.([^.]+)\.(cert|client-cert)", self.argument("key"))
if m:
repository = m.group(1)
key = m.group(2)
if self.option("unset"):
config.auth_config_source.remove_property(
f"certificates.{repository}.{key}"
)
return 0
if len(values) == 1:
new_value: str | bool = values[0]
if key == "cert" and boolean_validator(values[0]):
new_value = boolean_normalizer(values[0])
config.auth_config_source.add_property(
f"certificates.{repository}.{key}", new_value
)
else:
raise ValueError("You must pass exactly 1 value")
return 0
# handle build config settings
m = re.match(r"installer\.build-config-settings\.([^.]+)", self.argument("key"))
if m:
key = f"installer.build-config-settings.{canonicalize_name(m.group(1))}"
if self.option("unset"):
config.config_source.remove_property(key)
return 0
try:
settings = config.config_source.get_property(key)
except PropertyNotFoundError:
settings = {}
for value in values:
if build_config_setting_validator(value):
config_settings = build_config_setting_normalizer(value)
for setting_name, item in config_settings.items():
settings[setting_name] = item
else:
raise ValueError(
f"Invalid build config setting '{value}'. "
"It must be a valid JSON with each property a string or a list of strings."
)
config.config_source.add_property(key, settings)
return 0
raise ValueError(f"Setting {self.argument('key')} does not exist")
def _handle_single_value(
self,
source: ConfigSource,
key: str,
callbacks: tuple[Any, Any],
values: list[Any],
) -> int:
validator, normalizer = callbacks
if len(values) > 1:
raise RuntimeError("You can only pass one value.")
value = values[0]
if not validator(value):
raise RuntimeError(f'"{value}" is an invalid value for {key}')
source.add_property(key, normalizer(value))
return 0
def _list_configuration(
self, config: dict[str, Any], raw: dict[str, Any], k: str = ""
) -> None:
orig_k = k
for key, value in sorted(config.items()):
if k + key in self.LIST_PROHIBITED_SETTINGS:
continue
raw_val = raw.get(key)
if isinstance(value, dict):
k += f"{key}."
raw_val = cast("dict[str, Any]", raw_val)
self._list_configuration(value, raw_val, k=k)
k = orig_k
continue
elif isinstance(value, list):
value = ", ".join(
json.dumps(val) if isinstance(val, list) else val for val in value
)
value = f"[{value}]"
if k.startswith("repositories."):
message = f"<c1>{k + key}</c1> = <c2>{json.dumps(raw_val)}</c2>"
elif isinstance(raw_val, str) and raw_val != value:
message = (
f"<c1>{k + key}</c1> = <c2>{json.dumps(raw_val)}</c2> # {value}"
)
else:
message = f"<c1>{k + key}</c1> = <c2>{json.dumps(value)}</c2>"
self.line(message)
def _migrate(self) -> None:
from poetry.config.file_config_source import FileConfigSource
from poetry.locations import CONFIG_DIR
from poetry.toml.file import TOMLFile
config_file = TOMLFile(CONFIG_DIR / "config.toml")
if self.option("local"):
config_file = TOMLFile(self.poetry.file.path.parent / "poetry.toml")
if not config_file.exists():
raise RuntimeError("No local config file found")
config_source = FileConfigSource(config_file)
self.io.write_line("Checking for required migrations ...")
required_migrations = [
migration
for migration in CONFIG_MIGRATIONS
if migration.dry_run(config_source, io=self.io)
]
if not required_migrations:
self.io.write_line("Already up to date.")
return
if not self.io.is_interactive() or self.confirm(
"Proceed with migration?: ", False
):
for migration in required_migrations:
migration.apply(config_source)
self.io.write_line("Config migration successfully done.")
| ConfigCommand |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 472825,
"end": 473567
} | class ____(ValueChannelMixin, core.PositionValueDef):
"""
RadiusValue schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "radius"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| RadiusValue |
python | cherrypy__cherrypy | cherrypy/_cpmodpy.py | {
"start": 216,
"end": 3917
} | class ____:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import io
import logging
import os
import re
import sys
from more_itertools import always_iterable
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
"""Execute pre-initialization functions."""
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup"
# directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update(
{
'log.screen': False,
'tools.ignore_headers.on': True,
'tools.ignore_headers.headers': ['Range'],
},
)
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.unsubscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
@engine.subscribe('log')
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
| Root |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/common/strategies.py | {
"start": 622,
"end": 1396
} | class ____(SearchStrategy):
def __init__(self):
super().__init__()
self.__last = None
self.accepted = set()
def do_draw(self, data):
x = bytes(data.draw_integer(0, 255) for _ in range(100))
if x in self.accepted:
return True
ls = self.__last
if ls is None:
if all(x):
self.__last = x
self.accepted.add(x)
return True
else:
return False
diffs = [i for i in range(len(x)) if x[i] != ls[i]]
if len(diffs) == 1:
i = diffs[0]
if x[i] + 1 == ls[i]:
self.__last = x
self.accepted.add(x)
return True
return False
| HardToShrink |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/test_trainer.py | {
"start": 62337,
"end": 65610
} | class ____(Callback):
def state_dict(self) -> dict:
return {"a": None}
def test_on_load_checkpoint_missing_callbacks(tmp_path):
"""Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming."""
model = BoringModel()
chk = ModelCheckpoint(dirpath=tmp_path, save_last=True)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])
trainer.fit(model)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=5)
with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"):
trainer.fit(model, ckpt_path=chk.last_model_path)
def test_module_current_fx_attributes_reset(tmp_path):
"""Ensure that lightning module's attributes related to current fx are reset at the end of execution."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1, enable_checkpointing=False, logger=False)
trainer.fit(model)
assert model._current_fx_name is None
trainer.test(model)
assert model._current_fx_name is None
@pytest.mark.parametrize("fn", ["validate", "test", "predict"])
def test_exception_when_lightning_module_is_not_set_on_trainer(fn):
trainer = Trainer()
trainer_fn = getattr(trainer, fn)
with pytest.raises(TypeError, match=rf"{fn}\(\)` requires a `LightningModule"):
trainer_fn()
@RunIf(min_cuda_gpus=1)
# FixMe: the memory raises to 1024 from expected 512
@pytest.mark.xfail(AssertionError, strict=True, condition=_TORCH_EQUAL_2_8, reason="temporarily disabled for torch 2.8")
def test_multiple_trainer_constant_memory_allocated(tmp_path):
"""This tests ensures calling the trainer several times reset the memory back to 0."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
loss = super().training_step(batch, batch_idx)
self.log("train_loss", loss["loss"])
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.layer.parameters(), lr=0.1)
class Check(Callback):
def on_train_epoch_start(self, trainer, *_):
assert isinstance(trainer.strategy.model, DistributedDataParallel)
def current_memory():
# before measuring the memory force release any leftover allocations, including CUDA tensors
gc.collect()
return torch.cuda.memory_allocated(0)
model = TestModel()
trainer_kwargs = {
"default_root_dir": tmp_path,
"fast_dev_run": True,
"accelerator": "gpu",
"devices": 1,
"strategy": "ddp",
"enable_progress_bar": False,
"callbacks": Check(),
}
trainer = Trainer(**trainer_kwargs)
initial = current_memory()
trainer.fit(model)
assert trainer.strategy.model is model
assert list(trainer.optimizers[0].state.values())[0]["exp_avg_sq"].device == torch.device("cpu")
assert trainer.callback_metrics["train_loss"].device == torch.device("cpu")
assert current_memory() <= initial
deepcopy(trainer)
assert current_memory() <= initial
trainer_2 = Trainer(**trainer_kwargs)
trainer_2.fit(model)
assert current_memory() <= initial
| CustomCallbackOnLoadCheckpoint |
python | joke2k__faker | faker/providers/currency/vi_VN/__init__.py | {
"start": 46,
"end": 431
} | class ____(CurrencyProvider):
# Source: https://vi.wikipedia.org/wiki/%C4%90%E1%BB%93ng_(%C4%91%C6%A1n_v%E1%BB%8B_ti%E1%BB%81n_t%E1%BB%87)#Ti%E1%BB%81n_gi%E1%BA%A5y_-_Ti%E1%BB%81n_polymer # NOQA
price_formats = ["#.##", "%#.##", "%##.##", "%,###.##", "%#,###.##"]
def pricetag(self) -> str:
return "₫" + self.numerify(self.random_element(self.price_formats))
| Provider |
python | numba__numba | numba/cuda/compiler.py | {
"start": 3193,
"end": 3954
} | class ____(LoweringPass):
"""
Create a CUDACodeLibrary for the NativeLowering pass to populate. The
NativeLowering pass will create a code library if none exists, but we need
to set it up with nvvm_options from the flags if they are present.
"""
_name = "create_library"
def __init__(self):
LoweringPass.__init__(self)
def run_pass(self, state):
codegen = state.targetctx.codegen()
name = state.func_id.func_qualname
nvvm_options = state.flags.nvvm_options
state.library = codegen.create_library(name, nvvm_options=nvvm_options)
# Enable object caching upfront so that the library can be serialized.
state.library.enable_object_caching()
return True
| CreateLibrary |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_invite_index.py | {
"start": 1881,
"end": 8379
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-member-invite-index"
method = "post"
def setUp(self) -> None:
self.login_as(self.user)
def invite_all_helper(self, role):
invite_roles = ["owner", "manager", "member"]
user = self.create_user("user@localhost")
member = self.create_member(user=user, organization=self.organization, role=role)
self.login_as(user=user)
# When this is set to True, only roles with the member:admin permission can invite members
self.organization.flags.disable_member_invite = True
self.organization.save()
allowed_roles = member.get_allowed_org_roles_to_invite()
for invite_role in invite_roles:
data = {
"email": f"{invite_role}_1@localhost",
"orgRole": invite_role,
"teams": [self.team.slug],
}
if role == "member" or role == "admin":
response = self.get_success_response(
self.organization.slug, **data, status_code=201
)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.invite_status == InviteStatus.REQUESTED_TO_BE_INVITED.value
elif any(invite_role == allowed_role.id for allowed_role in allowed_roles):
self.get_success_response(self.organization.slug, **data, status_code=201)
else:
self.get_error_response(self.organization.slug, **data, status_code=400)
self.organization.flags.disable_member_invite = False
self.organization.save()
for invite_role in invite_roles:
data = {
"email": f"{invite_role}_2@localhost",
"orgRole": invite_role,
"teams": [self.team.slug],
}
if any(invite_role == allowed_role.id for allowed_role in allowed_roles):
self.get_success_response(self.organization.slug, **data, status_code=201)
else:
self.get_error_response(self.organization.slug, **data, status_code=400)
def invite_to_other_team_helper(self, role):
user = self.create_user("inviter@localhost")
self.create_member(user=user, organization=self.organization, role=role, teams=[self.team])
self.login_as(user=user)
other_team = self.create_team(organization=self.organization, name="Moo Deng's Team")
def get_data(email: str, other_team_invite: bool = False):
team_slug = other_team.slug if other_team_invite else self.team.slug
data: dict[str, str | list] = {
"email": f"{email}@localhost",
"orgRole": "member",
"teams": [team_slug],
}
return data
# members can never invite members if disable_member_invite = True
# an invite request will be created instead of an invite
self.organization.flags.allow_joinleave = True
self.organization.flags.disable_member_invite = True
self.organization.save()
response = self.get_success_response(
self.organization.slug, **get_data("foo1"), status_code=201
)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.invite_status == InviteStatus.REQUESTED_TO_BE_INVITED.value
self.organization.flags.allow_joinleave = False
self.organization.flags.disable_member_invite = True
self.organization.save()
response = self.get_success_response(
self.organization.slug, **get_data("foo2"), status_code=201
)
omi = OrganizationMemberInvite.objects.get(id=response.data["id"])
assert omi.invite_status == InviteStatus.REQUESTED_TO_BE_INVITED.value
# members can only invite members to teams they are in if allow_joinleave = False
self.organization.flags.allow_joinleave = False
self.organization.flags.disable_member_invite = False
self.organization.save()
self.get_success_response(self.organization.slug, **get_data("foo3"), status_code=201)
response = self.get_error_response(
self.organization.slug, **get_data("foo4", True), status_code=400
)
assert (
response.data["teams"][0]
== "You cannot assign members to teams you are not a member of."
)
response = self.get_error_response(
self.organization.slug,
**get_data("foo5", other_team_invite=True),
status_code=400,
)
assert (
response.data["teams"][0]
== "You cannot assign members to teams you are not a member of."
)
# members can invite member to any team if allow_joinleave = True
self.organization.flags.allow_joinleave = True
self.organization.flags.disable_member_invite = False
self.organization.save()
self.get_success_response(self.organization.slug, **get_data("foo6"), status_code=201)
self.get_success_response(self.organization.slug, **get_data("foo7", True), status_code=201)
self.get_success_response(
self.organization.slug,
**get_data("foo8", other_team_invite=True),
status_code=201,
)
def test_owner_invites(self) -> None:
self.invite_all_helper("owner")
def test_manager_invites(self) -> None:
self.invite_all_helper("manager")
def test_admin_invites(self) -> None:
self.invite_all_helper("admin")
self.invite_to_other_team_helper("admin")
def test_member_invites(self) -> None:
self.invite_all_helper("member")
self.invite_to_other_team_helper("member")
def test_respects_feature_flag(self) -> None:
user = self.create_user("baz@example.com")
with Feature({"organizations:invite-members": False}):
data = {"email": user.email, "orgRole": "member", "teams": [self.team.slug]}
self.get_error_response(self.organization.slug, **data, status_code=403)
def test_no_team_invites(self) -> None:
data = {"email": "eric@localhost", "orgRole": "owner", "teams": []}
response = self.get_success_response(self.organization.slug, **data)
assert response.data["email"] == "eric@localhost"
@with_feature("organizations:new-organization-member-invite")
| OrganizationMemberInvitePermissionRoleTest |
python | pytorch__pytorch | torch/ao/quantization/pt2e/export_utils.py | {
"start": 247,
"end": 7990
} | class ____(torch.nn.Module):
"""Class to wrap a callable in an :class:`torch.nn.Module`. Use this if you
are trying to export a callable.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, *args, **kwargs):
"""Simple forward that just calls the ``fn`` provided to :meth:`WrapperModule.__init__`."""
return self.fn(*args, **kwargs)
def model_is_exported(m: torch.nn.Module) -> bool:
"""
Return True if the `torch.nn.Module` was exported, False otherwise
(e.g. if the model was FX symbolically traced or not traced at all).
"""
return isinstance(m, torch.fx.GraphModule) and any(
"val" in n.meta for n in m.graph.nodes
)
def _replace_dropout(m: torch.fx.GraphModule, train_to_eval: bool):
"""
Switch dropout patterns in the model between train and eval modes.
Dropout has different behavior in train vs eval mode. For exported models,
however, calling `model.train()` or `model.eval()` does not automatically switch
the dropout behavior between the two modes, so here we need to rewrite the aten
dropout patterns manually to achieve the same effect.
See https://github.com/pytorch/pytorch/issues/103681.
"""
# Avoid circular dependencies
from .utils import _get_aten_graph_module_for_pattern
# Needed to ensure subgraph matches are self-contained
m.graph.eliminate_dead_code()
m.recompile()
for inplace in [False, True]:
def dropout_train(x):
return F.dropout(x, p=0.5, training=True, inplace=inplace)
def dropout_eval(x):
return F.dropout(x, p=0.5, training=False, inplace=inplace)
example_inputs = (torch.randn(1),)
if train_to_eval:
match_pattern = _get_aten_graph_module_for_pattern(
_WrapperModule(dropout_train),
example_inputs,
)
replacement_pattern = _get_aten_graph_module_for_pattern(
_WrapperModule(dropout_eval),
example_inputs,
)
else:
match_pattern = _get_aten_graph_module_for_pattern(
_WrapperModule(dropout_eval),
example_inputs,
)
replacement_pattern = _get_aten_graph_module_for_pattern(
_WrapperModule(dropout_train),
example_inputs,
)
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
replace_pattern_with_filters(
m,
match_pattern,
replacement_pattern,
match_filters=[],
ignore_literals=True,
)
m.recompile()
def _replace_batchnorm(m: torch.fx.GraphModule, train_to_eval: bool):
"""
Switch batchnorm patterns in the model between train and eval modes.
Batchnorm has different behavior in train vs eval mode. For exported models,
however, calling `model.train()` or `model.eval()` does not automatically switch
the batchnorm behavior between the two modes, so here we need to rewrite the aten
batchnorm patterns manually to achieve the same effect.
"""
# TODO(Leslie): This function still fails to support custom momentum and eps value.
# Enable this support in future updates.
# Avoid circular dependencies
from .utils import _get_aten_graph_module_for_pattern
# Needed to ensure subgraph matches are self-contained
m.graph.eliminate_dead_code()
m.recompile()
def bn_train(
x: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
):
return F.batch_norm(
x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True
)
def bn_eval(
x: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
):
return F.batch_norm(
x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=False
)
example_inputs = (
torch.randn(1, 1, 3, 3), # x
torch.randn(1), # bn_weight
torch.randn(1), # bn_bias
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
device = _assert_and_get_unique_device(m)
is_cuda = device is not None and device.type == "cuda"
bn_train_aten = _get_aten_graph_module_for_pattern(
_WrapperModule(bn_train),
example_inputs,
is_cuda,
)
bn_eval_aten = _get_aten_graph_module_for_pattern(
_WrapperModule(bn_eval),
example_inputs,
is_cuda,
)
if train_to_eval:
match_pattern = bn_train_aten
replacement_pattern = bn_eval_aten
else:
match_pattern = bn_eval_aten
replacement_pattern = bn_train_aten
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
replace_pattern_with_filters(
m,
match_pattern,
replacement_pattern,
match_filters=[],
ignore_literals=True,
)
m.recompile()
# TODO: expose these under this namespace?
def _move_exported_model_to_eval(model: torch.fx.GraphModule):
"""
Move an exported GraphModule to eval mode.
This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm.
QAT users should call this before performing inference on the model.
This call is idempotent; if the model is already in eval mode, nothing will happen.
"""
is_training = getattr(model, _EXPORTED_TRAINING_ATTR, True)
if not is_training:
return model
setattr(model, _EXPORTED_TRAINING_ATTR, False)
_replace_dropout(model, train_to_eval=True)
_replace_batchnorm(model, train_to_eval=True)
return model
def _move_exported_model_to_train(model: torch.fx.GraphModule):
"""
Move an exported GraphModule to train mode.
This is equivalent to model.train() but only for certain special ops like dropout, batchnorm.
QAT users should call this before performing training on the model.
This call is idempotent; if the model is already in train mode, nothing will happen.
"""
is_training = getattr(model, _EXPORTED_TRAINING_ATTR, False)
if is_training:
return model
setattr(model, _EXPORTED_TRAINING_ATTR, True)
_replace_dropout(model, train_to_eval=False)
_replace_batchnorm(model, train_to_eval=False)
return model
def _allow_exported_model_train_eval(model: torch.fx.GraphModule):
"""
Allow users to call `model.train()` and `model.eval()` on an exported model,
but with the effect of changing behavior between the two modes limited to special
ops only, which are currently dropout and batchnorm.
Note: This does not achieve the same effect as what `model.train()` and `model.eval()`
does in eager models, but only provides an approximation. In particular, user code
branching on `training` flag will not function correctly in general because the branch
is already specialized at export time. Additionally, other ops beyond dropout and batchnorm
that have different train/eval behavior will also not be converted properly.
"""
def _train(self, mode: bool = True):
if mode:
_move_exported_model_to_train(self)
else:
_move_exported_model_to_eval(self)
def _eval(self):
_move_exported_model_to_eval(self)
model.train = types.MethodType(_train, model) # type: ignore[method-assign]
model.eval = types.MethodType(_eval, model) # type: ignore[method-assign]
return model
| _WrapperModule |
python | sphinx-doc__sphinx | tests/test_intl/test_intl.py | {
"start": 27572,
"end": 65208
} | class ____(_MockClock):
"""Object for mocking :func:`time.time_ns` on Unix platforms.
Since nothing is needed for Unix platforms, this object acts as
a proxy so that the API is the same as :class:`_MockWindowsClock`.
"""
def time(self) -> int:
return time.time_ns()
def sleep(self, ds: float) -> None:
time.sleep(ds)
@pytest.fixture
def mock_time_and_i18n() -> Iterator[tuple[pytest.MonkeyPatch, _MockClock]]:
from sphinx.util.i18n import CatalogInfo
# save the 'original' definition
catalog_write_mo = CatalogInfo.write_mo
def mock_write_mo(self, locale, use_fuzzy=False):
catalog_write_mo(self, locale, use_fuzzy)
# ensure that the .mo file being written has a correct fake timestamp
_set_mtime_ns(self.mo_path, time.time_ns())
# see: https://github.com/pytest-dev/pytest/issues/363
with pytest.MonkeyPatch.context() as mock:
clock: _MockClock
if os.name == 'posix':
clock = _MockUnixClock()
else:
# When using pytest.mark.parametrize() to emulate test repetition,
# the teardown phase on Windows fails due to an error apparently in
# the colorama.ansitowin32 module, so we forcibly disable colors.
mock.setenv('NO_COLOR', '1')
# apply the patch only for Windows
clock = _MockWindowsClock()
mock.setattr('time.time_ns', clock.time)
# Use clock.sleep() to emulate time.sleep() but do not try
# to mock the latter since this might break other libraries.
mock.setattr('sphinx.util.i18n.CatalogInfo.write_mo', mock_write_mo)
yield mock, clock
# use the same testroot as 'gettext' since the latter contains less PO files
@sphinx_intl
@pytest.mark.sphinx(
'dummy',
testroot='builder-gettext-dont-rebuild-mo',
freshenv=True,
copy_test_root=True,
)
def test_dummy_should_rebuild_mo(mock_time_and_i18n, make_app, app_params):
mock, clock = mock_time_and_i18n
assert os.name == 'posix' or clock.time() == 0
args, kwargs = app_params
app = make_app(*args, **kwargs)
po_path, mo_path = _get_bom_intl_path(app)
# creation time of the those files (order does not matter)
bom_rst = app.srcdir / 'bom.rst'
bom_rst_time = time.time_ns()
index_rst = app.srcdir / 'index.rst'
index_rst_time = time.time_ns()
po_time = time.time_ns()
# patch the 'creation time' of the source files
assert _set_mtime_ns(po_path, po_time) == po_time
assert _set_mtime_ns(bom_rst, bom_rst_time) == bom_rst_time
assert _set_mtime_ns(index_rst, index_rst_time) == index_rst_time
assert not mo_path.exists()
# when writing mo files, the counter is updated by calling
# patch_write_mo which is called to create .mo files (and
# thus the timestamp of the files are not those given by
# the OS but our fake ones)
app.build()
assert mo_path.exists()
# Do a real sleep on POSIX, or simulate a sleep on Windows
# to ensure that calls to time.time_ns() remain consistent.
clock.sleep(0.1 if os.name == 'posix' else 1)
# check that the source files were not modified
assert bom_rst.stat().st_mtime_ns == bom_rst_time
assert index_rst.stat().st_mtime_ns == index_rst_time
# check that the 'bom' document is discovered after the .mo
# file has been written on the disk (i.e., read_doc() is called
# after the creation of the .mo files)
assert app.env.all_docs['bom'] > mo_path.stat().st_mtime_ns // 1000
# Since it is after the build, the number of documents to be updated is 0
update_targets = _get_update_targets(app)
assert update_targets[1] == set()
# When rewriting the timestamp of mo file, the number of documents to be
# updated will be changed.
new_mo_time = time.time_ns()
assert _set_mtime_ns(mo_path, new_mo_time) == new_mo_time
update_targets = _get_update_targets(app)
assert update_targets[1] == {'bom'}
mock.undo() # explicit call since it's not a context
# remove all sources for the next test
shutil.rmtree(app.srcdir, ignore_errors=True)
time.sleep(0.1 if os.name == 'posix' else 0.5) # real sleep
@sphinx_intl
@pytest.mark.sphinx(
'gettext',
testroot='builder-gettext-dont-rebuild-mo',
freshenv=True,
copy_test_root=True,
)
def test_gettext_dont_rebuild_mo(mock_time_and_i18n, app):
mock, clock = mock_time_and_i18n
assert os.name == 'posix' or clock.time() == 0
assert app.srcdir.exists()
# patch the 'creation time' of the source files
bom_rst = app.srcdir / 'bom.rst'
bom_rst_time = time.time_ns()
assert _set_mtime_ns(bom_rst, bom_rst_time) == bom_rst_time
index_rst = app.srcdir / 'index.rst'
index_rst_time = time.time_ns()
assert _set_mtime_ns(index_rst, index_rst_time) == index_rst_time
# phase 1: create fake MO file in the src directory
po_path, mo_path = _get_bom_intl_path(app)
write_mo(mo_path, read_po(po_path))
po_time = time.time_ns()
assert _set_mtime_ns(po_path, po_time) == po_time
# phase 2: build document with gettext builder.
# The mo file in the srcdir directory is retained.
app.build()
# Do a real sleep on POSIX, or simulate a sleep on Windows
# to ensure that calls to time.time_ns() remain consistent.
clock.sleep(0.5 if os.name == 'posix' else 1)
# Since it is after the build, the number of documents to be updated is 0
update_targets = _get_update_targets(app)
assert update_targets[1] == set()
# Even if the timestamp of the mo file is updated, the number of documents
# to be updated is 0. gettext builder does not rebuild because of mo update.
new_mo_time = time.time_ns()
assert _set_mtime_ns(mo_path, new_mo_time) == new_mo_time
update_targets = _get_update_targets(app)
assert update_targets[1] == set()
mock.undo() # remove the patch
# remove all sources for the next test
shutil.rmtree(app.srcdir, ignore_errors=True)
time.sleep(0.1 if os.name == 'posix' else 0.5) # real sleep
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_meta(app):
app.build()
# --- test for meta
result = (app.outdir / 'index.html').read_text(encoding='utf8')
expected_expr = (
'<meta content="TESTDATA FOR I18N" name="description" translated="True" />'
)
assert expected_expr in result
expected_expr = (
'<meta content="I18N, SPHINX, MARKUP" name="keywords" translated="True" />'
)
assert expected_expr in result
expected_expr = '<p class="caption" role="heading"><span class="caption-text">HIDDEN TOC</span></p>'
assert expected_expr in result
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_footnotes(app):
app.build()
# --- test for
# https://github.com/sphinx-doc/sphinx/issues/955
# cant-build-html-with-footnotes-when-using
# expect no error by build
(app.outdir / 'footnote.html').read_text(encoding='utf8')
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_undefined_refs(app):
app.build()
# --- links to undefined reference
result = (app.outdir / 'refs_inconsistency.html').read_text(encoding='utf8')
expected_expr = (
'<a class="reference external" href="https://www.example.com">reference</a>'
)
assert len(re.findall(expected_expr, result)) == 2
expected_expr = '<a class="reference internal" href="#reference">reference</a>'
assert len(re.findall(expected_expr, result)) == 0
expected_expr = (
'<a class="reference internal" '
'href="#i18n-with-refs-inconsistency">I18N WITH '
'REFS INCONSISTENCY</a>'
)
assert len(re.findall(expected_expr, result)) == 1
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_index_entries(app):
app.build()
# --- index entries: regression test for
# https://github.com/sphinx-doc/sphinx/issues/976
result = (app.outdir / 'genindex.html').read_text(encoding='utf8')
def wrap(tag, keyword):
start_tag = '<%s[^>]*>' % tag
end_tag = '</%s>' % tag
return rf'{start_tag}\s*{keyword}\s*{end_tag}'
def wrap_nest(parenttag, childtag, keyword):
start_tag1 = '<%s[^>]*>' % parenttag
start_tag2 = '<%s[^>]*>' % childtag
return rf'{start_tag1}\s*{keyword}\s*{start_tag2}'
expected_exprs = [
wrap('h2', 'Symbols'),
wrap('h2', 'C'),
wrap('h2', 'E'),
wrap('h2', 'F'),
wrap('h2', 'M'),
wrap('h2', 'N'),
wrap('h2', 'R'),
wrap('h2', 'S'),
wrap('h2', 'T'),
wrap('h2', 'V'),
wrap('a', 'NEWSLETTER'),
wrap('a', 'MAILING LIST'),
wrap('a', 'RECIPIENTS LIST'),
wrap('a', 'FIRST SECOND'),
wrap('a', 'SECOND THIRD'),
wrap('a', 'THIRD, FIRST'),
wrap_nest('li', 'ul', 'ENTRY'),
wrap_nest('li', 'ul', 'SEE'),
]
for expr in expected_exprs:
assert re.search(expr, result, re.MULTILINE), (
f'{expr!r} did not match {result!r}'
)
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_versionchanges(app):
app.build()
# --- versionchanges
result = (app.outdir / 'versionchange.html').read_text(encoding='utf8')
def get_content(result, name):
matched = re.search(r'<div class="%s">\n*(.*?)</div>' % name, result, re.DOTALL)
if matched:
return matched.group(1)
else:
return ''
expect1 = (
"""<p><span class="versionmodified deprecated">Deprecated since version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSION-DEPRECATED.</p>\n"""
"""<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF VERSION-DEPRECATED.</p>\n"""
)
matched_content = get_content(result, 'deprecated')
assert matched_content == expect1
expect2 = (
"""<p><span class="versionmodified added">Added in version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSION-ADDED.</p>\n"""
)
matched_content = get_content(result, 'versionadded')
assert matched_content == expect2
expect3 = (
"""<p><span class="versionmodified changed">Changed in version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSION-CHANGED.</p>\n"""
)
matched_content = get_content(result, 'versionchanged')
assert matched_content == expect3
expect4 = (
"""<p><span class="versionmodified removed">Removed in version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSION-REMOVED.</p>\n"""
)
matched_content = get_content(result, 'versionremoved')
assert matched_content == expect4
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_docfields(app):
app.build()
# --- docfields
# expect no error by build
(app.outdir / 'docfields.html').read_text(encoding='utf8')
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_template(app):
app.build()
# --- gettext template
result = (app.outdir / 'contents.html').read_text(encoding='utf8')
assert 'WELCOME' in result
assert 'SPHINX 2013.120' in result
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_rebuild_mo(app):
app.build()
# --- rebuild by .mo mtime
app.build()
_, updated, _ = _get_update_targets(app)
assert updated == set()
_, bom_file = _get_bom_intl_path(app)
old_mtime = bom_file.stat().st_mtime
new_mtime = old_mtime + (dt := 5)
os.utime(bom_file, (new_mtime, new_mtime))
assert old_mtime + dt == new_mtime, (old_mtime + dt, new_mtime)
_, updated, _ = _get_update_targets(app)
assert updated == {'bom'}
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_footnotes(app):
app.build()
# --- footnotes: regression test for fix
# https://github.com/sphinx-doc/sphinx/issues/955,
# https://github.com/sphinx-doc/sphinx/issues/1176
et = etree_parse(app.outdir / 'footnote.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
[
'I18N WITH FOOTNOTE',
'INCLUDE THIS CONTENTS',
'2',
'[ref]',
'1',
'100',
'*',
'. SECOND FOOTNOTE_REF',
'100',
'.',
],
['i18n-with-footnote', 'ref'],
)
# check node_id for footnote_references which refer same footnote
# See: https://github.com/sphinx-doc/sphinx/issues/3002
assert para0[0][4].text == para0[0][6].text == '100'
assert para0[0][4].attrib['ids'] != para0[0][6].attrib['ids']
footnote0 = secs[0].findall('footnote')
assert_elem(footnote0[0], ['1', 'THIS IS A AUTO NUMBERED FOOTNOTE.'], None, ['1'])
assert_elem(footnote0[1], ['100', 'THIS IS A NUMBERED FOOTNOTE.'], None, ['100'])
assert_elem(
footnote0[2], ['2', 'THIS IS A AUTO NUMBERED NAMED FOOTNOTE.'], None, ['named']
)
assert_elem(footnote0[3], ['*', 'THIS IS A AUTO SYMBOL FOOTNOTE.'], None, None)
citation0 = secs[0].findall('citation')
assert_elem(citation0[0], ['ref', 'THIS IS A NAMED FOOTNOTE.'], None, ['ref'])
warnings = getwarning(app.warning)
warning_expr = '.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
assert not re.search(warning_expr, warnings), (
f'{warning_expr!r} did match {warnings!r}'
)
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_footnote_backlinks(app):
app.build()
# --- footnote backlinks: i18n test for
# https://github.com/sphinx-doc/sphinx/issues/1058
et = etree_parse(app.outdir / 'footnote.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
refs0 = para0[0].findall('footnote_reference')
refid2id = {r.attrib.get('refid'): r.attrib.get('ids') for r in refs0}
footnote0 = secs[0].findall('footnote')
for footnote in footnote0:
ids = footnote.attrib.get('ids')
backrefs = footnote.attrib.get('backrefs').split()
assert refid2id[ids] in backrefs
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_refs_in_python_domain(app):
app.build()
# --- refs in the Python domain
et = etree_parse(app.outdir / 'refs_python_domain.xml')
secs = et.findall('section')
# regression test for fix
# https://github.com/sphinx-doc/sphinx/issues/1363
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
['SEE THIS DECORATOR:', 'sensitive_variables()', '.'],
['sensitive.sensitive_variables'],
)
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_keep_external_links(app):
app.build()
# --- keep external links: regression test for
# https://github.com/sphinx-doc/sphinx/issues/1044
et = etree_parse(app.outdir / 'external_links.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
# external link check
assert_elem(
para0[0], ['EXTERNAL LINK TO', 'Python', '.'], ['https://python.org/index.html']
)
# internal link check
assert_elem(
para0[1], ['EXTERNAL LINKS', 'IS INTERNAL LINK.'], ['i18n-with-external-links']
)
# inline link check
assert_elem(
para0[2], ['INLINE LINK BY', 'THE SPHINX SITE', '.'], ['https://sphinx-doc.org']
)
# unnamed link check
assert_elem(para0[3], ['UNNAMED', 'LINK', '.'], ['https://google.com'])
# link target swapped translation
para1 = secs[1].findall('paragraph')
assert_elem(
para1[0],
['LINK TO', 'external2', 'AND', 'external1', '.'],
['https://www.google.com/external2', 'https://www.google.com/external1'],
)
assert_elem(
para1[1],
['LINK TO', 'THE PYTHON SITE', 'AND', 'THE SPHINX SITE', '.'],
['https://python.org', 'https://sphinx-doc.org'],
)
# multiple references in the same line
para2 = secs[2].findall('paragraph')
assert_elem(
para2[0],
[
'LINK TO',
'EXTERNAL LINKS',
',',
'Python',
',',
'THE SPHINX SITE',
',',
'UNNAMED',
'AND',
'THE PYTHON SITE',
'.',
],
[
'i18n-with-external-links',
'https://python.org/index.html',
'https://sphinx-doc.org',
'https://google.com',
'https://python.org',
],
)
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_role_xref(app):
app.build()
# --- role xref: regression test for
# https://github.com/sphinx-doc/sphinx/issues/1090,
# https://github.com/sphinx-doc/sphinx/issues/1193
et = etree_parse(app.outdir / 'role_xref.xml')
sec1, sec2 = et.findall('section')
(para1,) = sec1.findall('paragraph')
assert_elem(
para1,
[
'LINK TO',
"I18N ROCK'N ROLE XREF",
',',
'CONTENTS',
',',
'SOME NEW TERM',
'.',
],
['i18n-role-xref', 'index', 'glossary_terms#term-Some-term'],
)
(sec1_1,) = sec1.findall('section')
(title,) = sec1_1.findall('title')
assert_elem(
title,
[
'LINK TO',
"I18N ROCK'N ROLE XREF",
',',
'CONTENTS',
',',
'SOME NEW TERM',
'.',
],
['i18n-role-xref', 'index', 'glossary_terms#term-Some-term'],
)
para2 = sec2.findall('paragraph')
assert_elem(
para2[0],
['LINK TO', 'SOME OTHER NEW TERM', 'AND', 'SOME NEW TERM', '.'],
['glossary_terms#term-Some-other-term', 'glossary_terms#term-Some-term'],
)
assert_elem(
para2[1],
['LINK TO', 'LABEL', 'AND', 'SAME TYPE LINKS', 'AND', 'SAME TYPE LINKS', '.'],
['i18n-role-xref', 'same-type-links', 'same-type-links'],
)
assert_elem(
para2[2],
['LINK TO', 'I18N WITH GLOSSARY TERMS', 'AND', 'CONTENTS', '.'],
['glossary_terms', 'index'],
)
assert_elem(
para2[3],
['LINK TO', '--module', 'AND', '-m', '.'],
['cmdoption-module', 'cmdoption-m'],
)
assert_elem(
para2[4],
['LINK TO', 'env2', 'AND', 'env1', '.'],
['envvar-env2', 'envvar-env1'],
)
# TODO: how do I link token role to productionlist?
assert_elem(para2[5], ['LINK TO', 'token2', 'AND', 'token1', '.'], [])
assert_elem(
para2[6],
['LINK TO', 'same-type-links', 'AND', 'i18n-role-xref', '.'],
['same-type-links', 'i18n-role-xref'],
)
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_warnings(app):
app.build()
# warnings
warnings = getwarning(app.warning)
assert warnings.count('term not in glossary') == 1
assert 'undefined label' not in warnings
assert 'unknown document' not in warnings
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_label_targets(app):
app.build()
# --- label targets: regression test for
# https://github.com/sphinx-doc/sphinx/issues/1193,
# https://github.com/sphinx-doc/sphinx/issues/1265
et = etree_parse(app.outdir / 'label_target.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
[
'X SECTION AND LABEL',
'POINT TO',
'implicit-target',
'AND',
'X SECTION AND LABEL',
'POINT TO',
'section-and-label',
'.',
],
['implicit-target', 'section-and-label'],
)
para1 = secs[1].findall('paragraph')
assert_elem(
para1[0],
[
'X EXPLICIT-TARGET',
'POINT TO',
'explicit-target',
'AND',
'X EXPLICIT-TARGET',
'POINT TO DUPLICATED ID LIKE',
'id1',
'.',
],
['explicit-target', 'id1'],
)
para2 = secs[2].findall('paragraph')
assert_elem(
para2[0],
['X IMPLICIT SECTION NAME', 'POINT TO', 'implicit-section-name', '.'],
['implicit-section-name'],
)
sec2 = secs[2].findall('section')
para2_0 = sec2[0].findall('paragraph')
assert_elem(para2_0[0], ['`X DUPLICATED SUB SECTION`_', 'IS BROKEN LINK.'], [])
para3 = secs[3].findall('paragraph')
assert_elem(
para3[0],
[
'X',
'bridge label',
'IS NOT TRANSLATABLE BUT LINKED TO TRANSLATED SECTION TITLE.',
],
['label-bridged-target-section'],
)
assert_elem(
para3[1],
[
'X',
'bridge label',
'POINT TO',
'LABEL BRIDGED TARGET SECTION',
'AND',
'bridge label2',
'POINT TO',
'SECTION AND LABEL',
'. THE SECOND APPEARED',
'bridge label2',
'POINT TO CORRECT TARGET.',
],
['label-bridged-target-section', 'section-and-label', 'section-and-label'],
)
@sphinx_intl
@pytest.mark.sphinx('xml', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_strange_markup(app):
app.build()
et = etree_parse(app.outdir / 'markup.xml')
secs = et.findall('section')
(subsec1,) = secs[0].findall('section')
(title1,) = subsec1.findall('title')
assert_elem(title1, ['1. TITLE STARTING WITH 1.'])
@sphinx_intl
@pytest.mark.sphinx('html', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_additional_targets_should_not_be_translated(app):
if tuple(map(int, pygments.__version__.split('.')[:2])) >= (2, 19):
sp = '<span class="w"> </span>'
else:
sp = ' '
app.build()
# [literalblock.txt]
result = (app.outdir / 'literalblock.html').read_text(encoding='utf8')
# title should be translated
expected_expr = 'CODE-BLOCKS'
assert_count(expected_expr, result, 2)
# ruby code block should not be translated but be highlighted
expected_expr = """<span class="s1">'result'</span>"""
assert_count(expected_expr, result, 1)
# C code block without lang should not be translated and *ruby* highlighted
expected_expr = """<span class="c1">#include <stdlib.h></span>"""
assert_count(expected_expr, result, 1)
# C code block with lang should not be translated but be *C* highlighted
expected_expr = (
"""<span class="cp">#include</span>"""
"""<span class="w"> </span>"""
"""<span class="cpf"><stdio.h></span>"""
)
assert_count(expected_expr, result, 1)
# literal block in list item should not be translated
expected_expr = (
"""<span class="n">literal</span>"""
"""<span class="o">-</span>"""
"""<span class="n">block</span>\n"""
"""<span class="k">in</span>"""
"""<span class="w"> </span>"""
"""<span class="n">list</span>"""
)
assert_count(expected_expr, result, 1)
# doctest block should not be translated but be highlighted
expected_expr = (
"""<span class="gp">>>> </span>"""
f"""<span class="kn">import</span>{sp}<span class="nn">sys</span> """
"""<span class="c1"># sys importing</span>"""
)
assert_count(expected_expr, result, 1)
# [raw.txt]
result = (app.outdir / 'raw.html').read_text(encoding='utf8')
# raw block should not be translated
expected_expr = """<iframe src="https://sphinx-doc.org"></iframe></section>"""
assert_count(expected_expr, result, 1)
# [figure.txt]
result = (app.outdir / 'figure.html').read_text(encoding='utf8')
# src for image block should not be translated (alt is translated)
expected_expr = """<img alt="I18N -> IMG" src="_images/i18n.png" />"""
assert_count(expected_expr, result, 1)
# src for figure block should not be translated (alt is translated)
expected_expr = """<img alt="IMG -> I18N" src="_images/img.png" />"""
assert_count(expected_expr, result, 1)
@sphinx_intl
@pytest.mark.sphinx(
'html',
testroot='intl',
srcdir='test_additional_targets_should_be_translated',
confoverrides={
'language': _CATALOG_LOCALE,
'locale_dirs': ['.'],
'gettext_compact': False,
'gettext_additional_targets': [
'index',
'literal-block',
'doctest-block',
'raw',
'image',
],
},
)
def test_additional_targets_should_be_translated(app):
if tuple(map(int, pygments.__version__.split('.')[:2])) >= (2, 19):
sp = '<span class="w"> </span>'
else:
sp = ' '
app.build()
# [literalblock.txt]
result = (app.outdir / 'literalblock.html').read_text(encoding='utf8')
# basic literal block should be translated
expected_expr = (
'<span class="n">THIS</span> <span class="n">IS</span>\n'
'<span class="n">LITERAL</span> <span class="n">BLOCK</span>'
)
assert_count(expected_expr, result, 1)
# literalinclude should be translated
expected_expr = '<span class="s2">"HTTPS://SPHINX-DOC.ORG"</span>'
assert_count(expected_expr, result, 1)
# title should be translated
expected_expr = 'CODE-BLOCKS'
assert_count(expected_expr, result, 2)
# ruby code block should be translated and be highlighted
expected_expr = """<span class="s1">'RESULT'</span>"""
assert_count(expected_expr, result, 1)
# C code block without lang should be translated and *ruby* highlighted
expected_expr = """<span class="c1">#include <STDLIB.H></span>"""
assert_count(expected_expr, result, 1)
# C code block with lang should be translated and be *C* highlighted
expected_expr = (
"""<span class="cp">#include</span>"""
"""<span class="w"> </span>"""
"""<span class="cpf"><STDIO.H></span>"""
)
assert_count(expected_expr, result, 1)
# literal block in list item should be translated
expected_expr = (
"""<span class="no">LITERAL</span>"""
"""<span class="o">-</span>"""
"""<span class="no">BLOCK</span>\n"""
"""<span class="no">IN</span>"""
"""<span class="w"> </span>"""
"""<span class="no">LIST</span>"""
)
assert_count(expected_expr, result, 1)
# doctest block should not be translated but be highlighted
expected_expr = (
"""<span class="gp">>>> </span>"""
f"""<span class="kn">import</span>{sp}<span class="nn">sys</span> """
"""<span class="c1"># SYS IMPORTING</span>"""
)
assert_count(expected_expr, result, 1)
# 'noqa' comments should remain in literal blocks.
assert_count('#noqa', result, 1)
# [raw.txt]
result = (app.outdir / 'raw.html').read_text(encoding='utf8')
# raw block should be translated
expected_expr = """<iframe src="HTTPS://SPHINX-DOC.ORG"></iframe></section>"""
assert_count(expected_expr, result, 1)
# [figure.txt]
result = (app.outdir / 'figure.html').read_text(encoding='utf8')
# alt and src for image block should be translated
expected_expr = """<img alt="I18N -> IMG" src="_images/img.png" />"""
assert_count(expected_expr, result, 1)
# alt and src for figure block should be translated
expected_expr = """<img alt="IMG -> I18N" src="_images/i18n.png" />"""
assert_count(expected_expr, result, 1)
@pytest.mark.sphinx(
'html',
testroot='intl_substitution_definitions',
confoverrides={
'language': _CATALOG_LOCALE,
'locale_dirs': ['.'],
'gettext_compact': False,
'gettext_additional_targets': [
'index',
'literal-block',
'doctest-block',
'raw',
'image',
],
},
copy_test_root=True,
)
def test_additional_targets_should_be_translated_substitution_definitions(app):
app.build(force_all=True)
# [prolog_epilog_substitution.txt]
result = (app.outdir / 'prolog_epilog_substitution.html').read_text(encoding='utf8')
# alt and src for image block should be translated
expected_expr = """<img alt="SUBST_PROLOG_2 TRANSLATED" src="_images/i18n.png" />"""
assert_count(expected_expr, result, 1)
# alt and src for image block should be translated
expected_expr = """<img alt="SUBST_EPILOG_2 TRANSLATED" src="_images/img.png" />"""
assert_count(expected_expr, result, 1)
@sphinx_intl
@pytest.mark.sphinx('text', testroot='intl')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_references(app):
app.build(filenames=[app.srcdir / 'refs.txt'])
warnings = app.warning.getvalue().replace(os.sep, '/')
warning_expr = 'refs.txt:\\d+: ERROR: Unknown target name:'
assert_count(warning_expr, warnings, 0)
@pytest.mark.sphinx(
'text',
testroot='intl_substitution_definitions',
confoverrides={
'language': _CATALOG_LOCALE,
'locale_dirs': ['.'],
'gettext_compact': False,
},
copy_test_root=True,
)
def test_text_prolog_epilog_substitution(app):
app.build()
result = (app.outdir / 'prolog_epilog_substitution.txt').read_text(encoding='utf8')
assert result == (
"""\
1. I18N WITH PROLOGUE AND EPILOGUE SUBSTITUTIONS
************************************************
THIS IS CONTENT THAT CONTAINS prologue substitute text.
SUBSTITUTED IMAGE [image: SUBST_PROLOG_2 TRANSLATED][image] HERE.
THIS IS CONTENT THAT CONTAINS epilogue substitute text.
SUBSTITUTED IMAGE [image: SUBST_EPILOG_2 TRANSLATED][image] HERE.
"""
)
@pytest.mark.usefixtures('_http_teapot')
@pytest.mark.sphinx(
'dummy',
testroot='images',
srcdir='test_intl_images',
confoverrides={'language': _CATALOG_LOCALE},
)
def test_image_glob_intl(app):
app.build()
# index.rst
doctree = app.env.get_doctree('index')
assert_node(
doctree[0][1], nodes.image, uri='rimg.xx.png', candidates={'*': 'rimg.xx.png'}
)
assert isinstance(doctree[0][2], nodes.figure)
assert_node(
doctree[0][2][0],
nodes.image,
uri='rimg.xx.png',
candidates={'*': 'rimg.xx.png'},
)
assert_node(
doctree[0][3],
nodes.image,
uri='img.*',
candidates={
'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png',
},
)
assert isinstance(doctree[0][4], nodes.figure)
assert_node(
doctree[0][4][0],
nodes.image,
uri='img.*',
candidates={
'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png',
},
)
# subdir/index.rst
doctree = app.env.get_doctree('subdir/index')
assert_node(
doctree[0][1],
nodes.image,
uri='subdir/rimg.xx.png',
candidates={'*': 'subdir/rimg.xx.png'},
)
assert_node(
doctree[0][2],
nodes.image,
uri='subdir/svgimg.*',
candidates={
'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.xx.svg',
},
)
assert isinstance(doctree[0][3], nodes.figure)
assert_node(
doctree[0][3][0],
nodes.image,
uri='subdir/svgimg.*',
candidates={
'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.xx.svg',
},
)
@pytest.mark.usefixtures('_http_teapot')
@pytest.mark.sphinx(
'dummy',
testroot='images',
srcdir='test_intl_images',
confoverrides={
'language': _CATALOG_LOCALE,
'figure_language_filename': '{root}{ext}.{language}',
},
)
def test_image_glob_intl_using_figure_language_filename(app):
app.build()
# index.rst
doctree = app.env.get_doctree('index')
assert_node(
doctree[0][1], nodes.image, uri='rimg.png.xx', candidates={'*': 'rimg.png.xx'}
)
assert isinstance(doctree[0][2], nodes.figure)
assert_node(
doctree[0][2][0],
nodes.image,
uri='rimg.png.xx',
candidates={'*': 'rimg.png.xx'},
)
assert_node(
doctree[0][3],
nodes.image,
uri='img.*',
candidates={
'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png',
},
)
assert isinstance(doctree[0][4], nodes.figure)
assert_node(
doctree[0][4][0],
nodes.image,
uri='img.*',
candidates={
'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png',
},
)
# subdir/index.rst
doctree = app.env.get_doctree('subdir/index')
assert_node(
doctree[0][1],
nodes.image,
uri='subdir/rimg.png',
candidates={'*': 'subdir/rimg.png'},
)
assert_node(
doctree[0][2],
nodes.image,
uri='subdir/svgimg.*',
candidates={
'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.svg',
},
)
assert isinstance(doctree[0][3], nodes.figure)
assert_node(
doctree[0][3][0],
nodes.image,
uri='subdir/svgimg.*',
candidates={
'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.svg',
},
)
def getwarning(warnings: StringIO) -> str:
return strip_escape_sequences(warnings.getvalue().replace(os.sep, '/'))
@pytest.mark.sphinx(
'html',
testroot='basic',
srcdir='gettext_allow_fuzzy_translations',
confoverrides={
'language': 'de',
'gettext_allow_fuzzy_translations': True,
},
)
def test_gettext_allow_fuzzy_translations(app):
locale_dir = app.srcdir / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.mkdir(parents=True, exist_ok=True)
with (locale_dir / 'index.po').open('wb') as f:
catalog = Catalog()
catalog.add('features', 'FEATURES', flags=('fuzzy',))
pofile.write_po(f, catalog)
app.build()
content = (app.outdir / 'index.html').read_text(encoding='utf8')
assert 'FEATURES' in content
@pytest.mark.sphinx(
'html',
testroot='basic',
srcdir='gettext_disallow_fuzzy_translations',
confoverrides={
'language': 'de',
'gettext_allow_fuzzy_translations': False,
},
)
def test_gettext_disallow_fuzzy_translations(app):
locale_dir = app.srcdir / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.mkdir(parents=True, exist_ok=True)
with (locale_dir / 'index.po').open('wb') as f:
catalog = Catalog()
catalog.add('features', 'FEATURES', flags=('fuzzy',))
pofile.write_po(f, catalog)
app.build()
content = (app.outdir / 'index.html').read_text(encoding='utf8')
assert 'FEATURES' not in content
@pytest.mark.sphinx(
'html',
testroot='basic',
confoverrides={'language': 'de', 'html_sidebars': {'**': ['searchbox.html']}},
copy_test_root=True,
)
def test_customize_system_message(make_app, app_params):
try:
# clear translators cache
locale.translators.clear()
# prepare message catalog (.po)
locale_dir = app_params.kwargs['srcdir'] / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.mkdir(parents=True, exist_ok=True)
with (locale_dir / 'sphinx.po').open('wb') as f:
catalog = Catalog()
catalog.add('Quick search', 'QUICK SEARCH')
pofile.write_po(f, catalog)
# construct application and convert po file to .mo
args, kwargs = app_params
app = make_app(*args, **kwargs)
assert (locale_dir / 'sphinx.mo').exists()
assert app.translator.gettext('Quick search') == 'QUICK SEARCH'
app.build()
content = (app.outdir / 'index.html').read_text(encoding='utf8')
assert 'QUICK SEARCH' in content
finally:
locale.translators.clear()
@pytest.mark.sphinx(
'html',
testroot='intl',
confoverrides={'today_fmt': '%Y-%m-%d'},
)
def test_customize_today_date_format(app, monkeypatch):
with monkeypatch.context() as m:
m.setenv('SOURCE_DATE_EPOCH', '1439131307')
app.build()
content = (app.outdir / 'refs.html').read_text(encoding='utf8')
assert '2015-08-09' in content
| _MockUnixClock |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/source_code.py | {
"start": 3641,
"end": 6525
} | class ____(NamespacedMetadataSet):
"""Metadata entries that apply to asset definitions and which specify the location where
source code for the asset can be found.
"""
code_references: Optional[CodeReferencesMetadataValue] = None
@classmethod
def namespace(cls) -> str:
return "dagster"
def _with_code_source_single_definition(
assets_def: Union["AssetsDefinition", "SourceAsset", "CacheableAssetsDefinition", "AssetSpec"],
) -> Union["AssetsDefinition", "SourceAsset", "CacheableAssetsDefinition", "AssetSpec"]:
from dagster._core.definitions.assets.definition.assets_definition import AssetsDefinition
# SourceAsset and AssetSpec don't have an op definition to point to - cacheable assets
# will be supported eventually but are a bit trickier
if not isinstance(assets_def, AssetsDefinition):
return assets_def
metadata_by_key = dict(assets_def.metadata_by_key) or {}
from dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction
from dagster._core.definitions.graph_definition import GraphDefinition
from dagster._core.definitions.op_definition import OpDefinition
base_fn = None
if isinstance(assets_def.node_def, OpDefinition):
base_fn = (
assets_def.node_def.compute_fn.decorated_fn
if isinstance(assets_def.node_def.compute_fn, DecoratedOpFunction)
else assets_def.node_def.compute_fn
)
elif isinstance(assets_def.node_def, GraphDefinition):
# For graph-backed assets, point to the composition fn, e.g. the
# function decorated by @graph_asset
base_fn = assets_def.node_def.composition_fn
if not base_fn:
return assets_def
source_path = local_source_path_from_fn(base_fn)
if source_path:
sources = [source_path]
for key in assets_def.keys:
# merge with any existing metadata
existing_source_code_metadata = CodeReferencesMetadataSet.extract(
metadata_by_key.get(key, {})
)
existing_code_references = (
existing_source_code_metadata.code_references.code_references
if existing_source_code_metadata.code_references
else []
)
sources_for_asset: list[Union[LocalFileCodeReference, UrlCodeReference]] = [
*existing_code_references,
*sources,
]
metadata_by_key[key] = {
**metadata_by_key.get(key, {}),
**CodeReferencesMetadataSet(
code_references=CodeReferencesMetadataValue(code_references=sources_for_asset)
),
}
return assets_def.map_asset_specs(
lambda spec: spec.replace_attributes(metadata=metadata_by_key[spec.key])
)
@beta
@public
| CodeReferencesMetadataSet |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 33478,
"end": 36601
} | class ____(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.time_res_stack = nn.ModuleList()
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(
in_channels=config.latent_channels, out_channels=config.latent_channels
)
self.time_res_stack.append(time_res_conv)
temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
for i in range(temp_upsample_block_num):
conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
self.time_conv.append(conv)
self.conv_in = nn.Conv2d(
config.latent_channels,
block_in,
kernel_size=3,
stride=1,
padding=1,
)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
self.up_block = Emu3VQVAEUpBlock(config)
block_in = config.base_channels * config.channel_multiplier[0]
self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
self.conv_out = nn.Conv2d(
block_in,
config.out_channels,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
# temporal convs
for layer in self.time_res_stack:
hidden_quant_states = layer(hidden_quant_states)
for layer in self.time_conv:
hidden_quant_states = layer(hidden_quant_states)
hidden_quant_states *= torch.sigmoid(hidden_quant_states)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
hidden_states = self.conv_in(hidden_states)
# middle & upsampling
hidden_states = self.middle_block(hidden_states, quant_states)
hidden_states = self.up_block(hidden_states, quant_states)
hidden_states = self.norm_out(hidden_states, quant_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.
This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from
[ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv
Taigman](https://huggingface.co/papers/2203.13131).
"""
)
| Emu3VQVAEDecoder |
python | PrefectHQ__prefect | tests/infrastructure/provisioners/test_ecs.py | {
"start": 2086,
"end": 6087
} | class ____:
async def test_requires_provisioning_no_policy(self, iam_policy_resource):
# Check if provisioning is needed
needs_provisioning = await iam_policy_resource.requires_provisioning()
assert needs_provisioning
@pytest.mark.usefixtures("existing_iam_policy")
async def test_requires_provisioning_with_policy(self, iam_policy_resource):
needs_provisioning = await iam_policy_resource.requires_provisioning()
assert not needs_provisioning
async def test_provision(self, iam_policy_resource):
advance_mock = MagicMock()
iam_client = boto3.client("iam")
iam_client.create_user(UserName="prefect-ecs-user")
# Provision IAM policy
await iam_policy_resource.provision(
advance=advance_mock,
policy_document={
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PrefectEcsPolicy",
"Effect": "Allow",
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
],
"Resource": "*",
}
],
},
)
# Check if the IAM policy exists
policies = iam_client.list_policies(Scope="Local")["Policies"]
policy_names = [policy["PolicyName"] for policy in policies]
assert "prefect-ecs-policy" in policy_names
advance_mock.assert_called_once()
@pytest.mark.usefixtures("existing_iam_policy")
async def test_provision_preexisting_policy(self):
advance_mock = MagicMock()
iam_policy_resource = IamPolicyResource(policy_name="prefect-ecs-policy")
result = await iam_policy_resource.provision(
advance=advance_mock, policy_document={}
)
# returns existing policy ARN
assert result == "arn:aws:iam::123456789012:policy/prefect-ecs-policy"
advance_mock.assert_not_called()
@pytest.mark.usefixtures("existing_iam_policy")
async def test_get_task_count_policy_exists(self, iam_policy_resource):
count = await iam_policy_resource.get_task_count()
assert count == 0
async def test_get_task_count_policy_does_not_exist(self, iam_policy_resource):
count = await iam_policy_resource.get_task_count()
assert count == 1
@pytest.mark.usefixtures("existing_iam_policy")
async def test_get_planned_actions_policy_exists(self, iam_policy_resource):
actions = await iam_policy_resource.get_planned_actions()
assert actions == []
async def test_get_planned_actions_policy_does_not_exist(self, iam_policy_resource):
actions = await iam_policy_resource.get_planned_actions()
assert actions == [
"Creating and attaching an IAM policy for managing ECS tasks:"
f" [blue]{iam_policy_resource._policy_name}[/]"
]
@pytest.fixture
def iam_user_resource():
return IamUserResource(user_name="prefect-ecs-user")
@pytest.fixture
def existing_iam_user():
iam_client = boto3.client("iam")
iam_client.create_user(UserName="prefect-ecs-user")
yield
iam_client.delete_user(UserName="prefect-ecs-user")
@pytest.fixture
def existing_execution_role():
iam_client = boto3.client("iam")
iam_client.create_role(
RoleName="PrefectEcsTaskExecutionRole",
AssumeRolePolicyDocument=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PrefectEcsExecutionRole",
"Effect": "Allow",
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
"Action": "sts:AssumeRole",
}
],
}
),
)
yield
iam_client.delete_role(RoleName="PrefectEcsTaskExecutionRole")
| TestIamPolicyResource |
python | numpy__numpy | numpy/_typing/_dtype_like.py | {
"start": 786,
"end": 1036
} | class ____(_DTypeDictBase, total=False):
# Only `str` elements are usable as indexing aliases,
# but `titles` can in principle accept any object
offsets: Sequence[int]
titles: Sequence[Any]
itemsize: int
aligned: bool
| _DTypeDict |
python | apache__airflow | providers/discord/src/airflow/providers/discord/hooks/discord_webhook.py | {
"start": 1130,
"end": 3220
} | class ____:
"""Contains the common functionality."""
def get_webhook_endpoint(self, conn: Connection | None, webhook_endpoint: str | None) -> str:
"""
Return the default webhook endpoint or override if a webhook_endpoint is manually supplied.
:param conn: Airflow Discord connection
:param webhook_endpoint: The manually provided webhook endpoint
:return: Webhook endpoint (str) to use
"""
if webhook_endpoint:
endpoint = webhook_endpoint
elif conn:
extra = conn.extra_dejson
endpoint = extra.get("webhook_endpoint", "")
else:
raise ValueError(
"Cannot get webhook endpoint: No valid Discord webhook endpoint or http_conn_id supplied."
)
# make sure endpoint matches the expected Discord webhook format
if not re.fullmatch("webhooks/[0-9]+/[a-zA-Z0-9_-]+", endpoint):
raise ValueError(
'Expected Discord webhook endpoint in the form of "webhooks/{webhook.id}/{webhook.token}".'
)
return endpoint
def build_discord_payload(
self, *, tts: bool, message: str, username: str | None, avatar_url: str | None
) -> str:
"""
Build a valid Discord JSON payload.
:param tts: Is a text-to-speech message
:param message: The message you want to send to your Discord channel
(max 2000 characters)
:param username: Override the default username of the webhook
:param avatar_url: Override the default avatar of the webhook
:return: Discord payload (str) to send
"""
if len(message) > 2000:
raise ValueError("Discord message length must be 2000 or fewer characters.")
payload: dict[str, Any] = {
"content": message,
"tts": tts,
}
if username:
payload["username"] = username
if avatar_url:
payload["avatar_url"] = avatar_url
return json.dumps(payload)
| DiscordCommonHandler |
python | pytorch__pytorch | torch/fx/proxy.py | {
"start": 25463,
"end": 26692
} | class ____(Proxy):
"""
A Proxy subclass that propagates metadata (meta['val']) during graph tracing.
"""
def __init__(
self, node: Node, tracer: "Optional[TracerBase]" = None, fake_mode=None
):
super().__init__(node, tracer)
self.fake_mode = fake_mode
def __repr__(self) -> str:
return f"MetaProxy({self.node.name})"
@classmethod
def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
args = args if args else ()
kwargs = kwargs if kwargs else {}
meta_proxy = None
for arg in args:
if isinstance(arg, MetaProxy):
meta_proxy = arg
break
assert meta_proxy is not None, (
"No MetaProxy found in arguments, but one is expected."
)
proxy = super().__torch_function__(orig_method, types, args, kwargs)
with meta_proxy.fake_mode:
proxy.node.meta["val"] = orig_method(
*[a.node.meta["val"] if isinstance(a, Proxy) else a for a in args],
**kwargs,
)
return MetaProxy(proxy.node, proxy.tracer, meta_proxy.fake_mode)
@compatibility(is_backward_compatible=True)
| MetaProxy |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 4832,
"end": 5003
} | class ____(str, _Action, Enum):
READ = "read_nodes"
@staticmethod
def values() -> List[str]:
return [action.value for action in NodesAction]
| NodesAction |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/batch/boto_schema.py | {
"start": 2119,
"end": 2445
} | class ____(Schema):
"""API Response for Describe Jobs."""
# The list of jobs
jobs = fields.List(fields.Nested(BatchJobDetailSchema), required=True)
class Meta:
"""Options object for a Schema. See Schema.Meta for more details and valid values."""
unknown = EXCLUDE
| BatchDescribeJobsResponseSchema |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/resource_tests/pythonic_resources/test_type_signatures.py | {
"start": 966,
"end": 1028
} | class ____(ConfigurableResource):
a_string: str
| InnerResource |
python | google__pytype | pytype/tools/xref/testdata/class_def.py | {
"start": 478,
"end": 592
} | class ____(A, B):
pass
#- @Bar defines/binding ClassBar
#- ClassBar.node/kind record
#- ClassBar.subkind class
| Foo |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 12086,
"end": 15322
} | class ____:
"""
Avoid "Permission denied error" on Windows:
with tempfile.NamedTemporaryFile("w", suffix=".gv") as temp_file:
# Not writable on Windows:
# https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile
Example:
with WritableTempFile("w", suffix=".gv") as temp_file:
tree.to_dotfile(temp_file.name)
"""
def __init__(
self, mode: str = "w", *, encoding: Any = None, suffix: Any = None
) -> None:
self.mode = mode
self.encoding = encoding
self.suffix = suffix
def __enter__(self) -> _TemporaryFileWrapper[Any]:
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, encoding=self.encoding, suffix=self.suffix, delete=False
)
return self.temp_file
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.temp_file.close()
try:
os.unlink(self.temp_file.name)
except OSError as e:
if _IS_WINDOWS:
# On Windows, some case temp file is opened and fail to unlink. Need to ignore it.
pass
else:
raise e
def write(
content: str | bytes,
extension: str,
extra: str = "",
hash_type: str = "code",
specified_dir: str = "",
key: str | None = None,
) -> tuple[str, str]:
if key is None:
# use striped content to compute hash so we don't end up with different
# hashes just because the content begins/ends with different number of
# spaces.
key = get_hash(content.strip(), extra, hash_type)
basename, _subdir, path = get_path(key, extension, specified_dir)
if not os.path.exists(path):
write_atomic(path, content, make_dirs=True)
return basename, path
def write_text(text: str) -> str:
"""
Write the `text` to a file and return the path computed based on the hash.
"""
return write(text, "txt")[1]
def write_atomic(
path_: str,
content: str | bytes,
make_dirs: bool = False,
encode_utf_8: bool = False,
) -> None:
# Write into temporary file first to avoid conflicts between threads
# Avoid using a named temporary file, as those have restricted permissions
assert isinstance(content, (str, bytes)), (
"Only strings and byte arrays can be saved in the cache"
)
path = Path(path_)
if make_dirs:
path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp"
write_mode = "w" if isinstance(content, str) else "wb"
with tmp_path.open(write_mode, encoding="utf-8" if encode_utf_8 else None) as f:
f.write(content)
try:
tmp_path.rename(target=path)
except FileExistsError:
if not _IS_WINDOWS:
raise
# On Windows file exist is expected: https://docs.python.org/3/library/pathlib.html#pathlib.Path.rename
# Below two lines code is equal to `tmp_path.rename(path)` on non-Windows OS.
# 1. Copy tmp_file to Target(Dst) file.
shutil.copy2(src=tmp_path, dst=path)
# 2. Delete tmp_file.
os.remove(tmp_path)
@dataclasses.dataclass
| WritableTempFile |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 3678,
"end": 3775
} | class ____(models.Model):
user = models.ForeignKey("auth.User", on_delete=models.CASCADE)
| Entry |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_s3.py | {
"start": 3796,
"end": 5083
} | class ____:
def setup_method(self):
self.delete_bucket_operator = S3DeleteBucketOperator(
task_id="test-s3-delete-operator",
bucket_name=BUCKET_NAME,
)
@mock_aws
@mock.patch.object(S3Hook, "delete_bucket")
@mock.patch.object(S3Hook, "check_for_bucket")
def test_execute_if_bucket_exist(self, mock_check_for_bucket, mock_delete_bucket):
mock_check_for_bucket.return_value = True
# execute s3 bucket delete operator
self.delete_bucket_operator.execute({})
mock_check_for_bucket.assert_called_once_with(BUCKET_NAME)
mock_delete_bucket.assert_called_once_with(bucket_name=BUCKET_NAME, force_delete=False)
@mock_aws
@mock.patch.object(S3Hook, "delete_bucket")
@mock.patch.object(S3Hook, "check_for_bucket")
def test_execute_if_not_bucket_exist(self, mock_check_for_bucket, mock_delete_bucket):
mock_check_for_bucket.return_value = False
# execute s3 bucket delete operator
self.delete_bucket_operator.execute({})
mock_check_for_bucket.assert_called_once_with(BUCKET_NAME)
mock_delete_bucket.assert_not_called()
def test_template_fields(self):
validate_template_fields(self.delete_bucket_operator)
| TestS3DeleteBucketOperator |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 96340,
"end": 96403
} | class ____(str, Enum):
a = 'a'
b = 'b'
c = 'c'
| MyEnum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.