language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F821_5.py | {
"start": 115,
"end": 246
} | class ____:
class InnerClass:
pass
def failing_func(self) -> "InnerClass":
return self.InnerClass()
| OuterClass |
python | pytorch__pytorch | test/test_numa_binding.py | {
"start": 1163,
"end": 31605
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._mock_file_path_to_contents: dict[str, str] = {}
self._mock_device_properties: list[MockDeviceProperties] = []
self._mock_num_logical_cpus = 0
self._mock_num_numa_nodes = 0
self._mock_num_sockets = 0
self._context_managers_to_apply_to_all_tests = [
patch("torch.cuda.device_count", self._mock_device_count),
patch("torch.cuda.get_device_properties", self._mock_get_device_properties),
patch("torch.cuda.is_available", self._mock_is_available),
# Implicitly used by dynamo
patch("torch.cuda.get_rng_state"),
patch("builtins.open", new=self._mock_open),
patch("os.listdir", new=self._mock_listdir),
patch("os.sched_getaffinity", new=self._mock_sched_getaffinity),
patch("torch.numa.binding.signpost_event", self._mock_signpost_event),
patch("torch.numa.binding.shutil.which", return_value="/usr/bin/numactl"),
]
for context_manager in self._context_managers_to_apply_to_all_tests:
context_manager.__enter__()
def tearDown(self) -> None:
for context_manager in self._context_managers_to_apply_to_all_tests:
context_manager.__exit__(None, None, None)
super().tearDown()
def _mock_signpost_event(self, *args, **kwargs) -> None:
# Please keep these parameters JSON serializable for logging purposes
json.dumps(kwargs["parameters"])
return signpost_event(*args, **kwargs)
def _add_mock_hardware(
self,
*,
num_sockets: int,
num_numa_nodes_per_socket: int,
num_gpus_per_numa_node: int,
num_l3_caches_per_numa_node: int,
num_physical_core_per_l3_cache: int,
) -> None:
"""
It's not fun, but we mock everything down to sysfs level
to make sure we get really thorough coverage.
"""
for socket_index in range(num_sockets):
for numa_node_index in range(
self._mock_num_numa_nodes,
self._mock_num_numa_nodes + num_numa_nodes_per_socket,
):
self._mock_file_contents(
file_path=f"/sys/devices/system/node/node{numa_node_index}/cpulist",
contents=f"{self._mock_num_logical_cpus}-"
+ f"{self._mock_num_logical_cpus + num_l3_caches_per_numa_node * num_physical_core_per_l3_cache * 2 - 1}",
)
for gpu_index in range(
len(self._mock_device_properties),
len(self._mock_device_properties) + num_gpus_per_numa_node,
):
device_properties = MockDeviceProperties(
name=f"mock_gpu_{gpu_index}",
major=8,
minor=0,
total_memory="512GB",
multi_processor_count=256,
uuid=f"mock_gpu_uuid_{gpu_index}",
pci_bus_id=gpu_index,
pci_device_id=gpu_index,
pci_domain_id=gpu_index,
L2_cache_size="40MB",
)
self._mock_device_properties.append(device_properties)
pci_numa_node_path = (
self._get_corresponding_pci_numa_node_file_path(
device_properties=device_properties
)
)
self._mock_file_contents(
file_path=pci_numa_node_path,
contents=str(numa_node_index),
)
for _ in range(num_l3_caches_per_numa_node):
lowest_logical_cpu_index_on_l3 = self._mock_num_logical_cpus
highest_logical_cpu_index_on_l3 = (
self._mock_num_logical_cpus
+ 2 * num_physical_core_per_l3_cache
- 1
)
for logical_cpu_index in range(
self._mock_num_logical_cpus,
self._mock_num_logical_cpus
# Assume hyperthreaded
+ 2 * num_physical_core_per_l3_cache,
):
thread_siblings_range_str = (
f"{logical_cpu_index - 1}-{logical_cpu_index}"
if logical_cpu_index % 2
else f"{logical_cpu_index}-{logical_cpu_index + 1}"
)
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/topology/thread_siblings_list",
contents=thread_siblings_range_str,
)
# Unrelated file our logic should know to skip
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/cache/paulwuzhere",
contents="Data",
)
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/topology/physical_package_id",
contents=str(socket_index),
)
for cache_level in range(5):
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/cache/index{cache_level}/type",
contents="ShouldSkip" if cache_level == 4 else "Data",
)
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/cache/index{cache_level}/level",
contents=str(cache_level),
)
self._mock_file_contents(
file_path=f"/sys/devices/system/cpu/cpu{logical_cpu_index}/cache/index{cache_level}/shared_cpu_list",
contents=(
f"{lowest_logical_cpu_index_on_l3}-{highest_logical_cpu_index_on_l3}"
if cache_level == 3
# Assume L1-2 are per physical core
else thread_siblings_range_str
),
)
self._mock_num_logical_cpus += 1
self._mock_num_numa_nodes += 1
self._mock_num_sockets += 1
self._mock_file_contents(
file_path="/sys/devices/system/node/possible",
contents=f"0-{self._mock_num_numa_nodes - 1}",
)
def _mock_is_available(self) -> bool:
return len(self._mock_device_properties) > 0
def _get_corresponding_pci_numa_node_file_path(
self, *, device_properties: MockDeviceProperties
) -> str:
pci_addr = (
f"{device_properties.pci_domain_id:04x}:"
+ f"{device_properties.pci_bus_id:02x}:{device_properties.pci_device_id:02x}.0"
)
return f"/sys/bus/pci/devices/{pci_addr}/numa_node"
def _mock_file_contents(self, *, file_path: str, contents: str) -> None:
self._mock_file_path_to_contents[file_path] = contents
def _mock_device_count(self) -> int:
return len(self._mock_device_properties)
def _mock_get_device_properties(self, index: int) -> MockDeviceProperties:
return self._mock_device_properties[index]
def _mock_open(self, path: str, *args, **kwargs) -> Any:
if path in self._mock_file_path_to_contents:
return mock_open(read_data=self._mock_file_path_to_contents[path])()
if isinstance(path, str) and path.startswith("/sys/"):
raise FileNotFoundError(f"File {path} was not mocked.")
# Looks like CI is calling open and intending to open an actual file in some places.
# Need this to make the CI pass.
return _real_open(path, *args, **kwargs)
def _mock_listdir(self, target_path: str) -> set[str]:
if not target_path.endswith("/"):
target_path += "/"
return {
mock_path.split(target_path)[1].split("/")[0]
for mock_path in self._mock_file_path_to_contents
if mock_path.startswith(target_path)
}
def _mock_sched_getaffinity(self, pid: int) -> set[int]:
return set(range(self._mock_num_logical_cpus))
def _start_processes_for_str_entrypoint_and_get_command_args(
self, *, numa_options: Optional[NumaOptions], target_local_rank: int
) -> tuple[str, ...]:
with patch(
"torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler.Popen"
) as mock_popen:
start_processes(
name="test_process",
entrypoint="echo",
args=dict.fromkeys(
range(self._mock_device_count()), ("Hello, world!",)
),
envs={
i: {"LOCAL_RANK": str(i)} for i in range(self._mock_device_count())
},
logs_specs=DefaultLogsSpecs(),
numa_options=numa_options,
)
call_args = next(
call_args
for call_args in mock_popen.call_args_list
if call_args.kwargs.get("env", {}).get("LOCAL_RANK")
== str(target_local_rank)
)
return call_args.kwargs["args"]
def _start_processes_for_callable_entrypoint_and_get_sched_setaffinity_cpus(
self, *, numa_options: Optional[NumaOptions], target_local_rank: int
) -> Optional[set[int]]:
target_sched_setaffinity_logical_cpu_indices = None
def mock_sched_setaffinity(*args, **kwargs) -> None:
nonlocal target_sched_setaffinity_logical_cpu_indices
target_sched_setaffinity_logical_cpu_indices = args[1]
def dummy_fn():
pass
import torch.multiprocessing as mp
ctx = mp.get_context()
mock_queue = ctx.SimpleQueue()
mock_event = ctx.Event()
with patch("os.sched_setaffinity", mock_sched_setaffinity):
mock_event.set() # Prevent hanging
# This is the entrypoint for subprocesses with Callable entrypoints
_wrap(
local_rank=target_local_rank,
fn=dummy_fn,
args={target_local_rank: ()},
envs={target_local_rank: {}},
stdout_redirects={target_local_rank: ""},
stderr_redirects={target_local_rank: ""},
ret_vals={target_local_rank: mock_queue},
queue_finished_reading_event=mock_event,
numa_options=numa_options,
)
return target_sched_setaffinity_logical_cpu_indices
def test_node_numa_binding(self) -> None:
self._add_mock_hardware(
num_sockets=4,
num_numa_nodes_per_socket=2,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=4,
num_physical_core_per_l3_cache=2,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.NODE),
target_local_rank=11,
)
self.assertEqual(
command_args,
# There are 8 numa nodes and 2 GPUs per numa node, so GPU 11 would be
# on numa node 11 // 2 = 5.
# Each numa node has 4 * 2 * 2 = 16 logical CPUs
# Numa node 5 has CPUs 80-95
("numactl", "--physcpubind=80-95", "echo", "Hello, world!"),
)
def test_no_numa_binding_if_numa_options_not_provided(self) -> None:
self._add_mock_hardware(
num_sockets=4,
num_numa_nodes_per_socket=2,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=4,
num_physical_core_per_l3_cache=2,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=None, target_local_rank=11
)
self.assertEqual(
command_args,
("echo", "Hello, world!"),
)
def test_default_numa_binding(self) -> None:
# Inner import to avoid crashing if not torch.distributed.is_available()
from torch.distributed.launcher.api import LaunchConfig
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with patch(
"torch.distributed.launcher.api.get_default_numa_options",
return_value=NumaOptions(
affinity_mode=AffinityMode.NODE, should_fall_back_if_binding_fails=True
),
):
launch_config = LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=1,
# Don't provide numa_options
)
self.assertEqual(
launch_config.numa_options,
NumaOptions(
affinity_mode=AffinityMode.NODE, should_fall_back_if_binding_fails=True
),
)
def test_fallback(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with (
patch("torch.numa.binding.signpost_event") as signpost_patch,
patch(
"torch.numa.binding._get_numa_node_index_for_gpu_index",
side_effect=Exception("Mock exception!"),
),
):
command_args = (
self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(
affinity_mode=AffinityMode.NODE,
should_fall_back_if_binding_fails=True,
),
target_local_rank=0,
)
)
self.assertIn(
"Mock exception!",
signpost_patch.call_args.kwargs["parameters"]["traceback"],
)
self.assertEqual(
command_args,
("echo", "Hello, world!"),
)
def test_fallback_if_numactl_not_available(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with (
patch("torch.numa.binding.signpost_event") as signpost_patch,
patch("torch.numa.binding.shutil.which", return_value=None),
):
command_args = (
self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(
affinity_mode=AffinityMode.NODE,
should_fall_back_if_binding_fails=True,
),
target_local_rank=0,
)
)
self.assertIn(
"numactl CLI is required for NUMA binding",
signpost_patch.call_args.kwargs["parameters"]["traceback"],
)
self.assertEqual(
command_args,
("echo", "Hello, world!"),
)
def test_explicit_numa_options_overrides_default(self) -> None:
# Inner import to avoid crashing if not torch.distributed.is_available()
from torch.distributed.launcher.api import LaunchConfig
with patch(
"torch.distributed.launcher.api.get_default_numa_options",
return_value=NumaOptions(affinity_mode=AffinityMode.NODE),
):
launch_config = LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=1,
numa_options=NumaOptions(affinity_mode=AffinityMode.EXCLUSIVE),
)
self.assertEqual(
launch_config.numa_options,
NumaOptions(affinity_mode=AffinityMode.EXCLUSIVE),
)
def test_nproc_must_equal_cuda_device_count_to_use_default_numa_options(
self,
) -> None:
# Inner import to avoid crashing if not torch.distributed.is_available()
from torch.distributed.launcher.api import LaunchConfig
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with patch(
"torch.distributed.launcher.api.get_default_numa_options"
) as mock_get_default_numa_options:
launch_config = LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=2,
)
mock_get_default_numa_options.assert_not_called()
self.assertIsNone(launch_config.numa_options)
def test_socket_numa_binding_with_multiple_numa_per_socket(self) -> None:
self._add_mock_hardware(
num_sockets=4,
num_numa_nodes_per_socket=2,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=4,
num_physical_core_per_l3_cache=2,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.SOCKET),
target_local_rank=15,
)
self.assertEqual(
command_args,
# GPU 15 is on numa node 15 // 2 = 7, which is on socket 3 (numa nodes 6 and 7)
# Each numa node has 4 * 2 * 2 = 16 logical CPUs
# Numa nodes 6 and 7 have CPUs 96-111 and 112-127
("numactl", "--physcpubind=96-127", "echo", "Hello, world!"),
)
def test_socket_numa_binding_with_single_numa_per_socket(self) -> None:
self._add_mock_hardware(
num_sockets=4,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=4,
num_physical_core_per_l3_cache=2,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.SOCKET),
target_local_rank=7,
)
self.assertEqual(
command_args,
# GPU 7 is on numa node 7 // 2 = 3, which is socket 3 by itself
# Each numa node has 4 * 2 * 2 = 16 logical CPUs
# Numa node 3 has CPUs 48-63
("numactl", "--physcpubind=48-63", "echo", "Hello, world!"),
)
def test_exclusive_numa_binding(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=3,
)
command_args_0 = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.EXCLUSIVE),
target_local_rank=0,
)
self.assertEqual(
command_args_0,
# Gets an extra physical core due to odd number of physical cores on numa node
# 3 physical cores total, 2 GPUs: GPU 0 gets 2 physical cores (CPUs 0-3)
("numactl", "--physcpubind=0-3", "echo", "Hello, world!"),
)
command_args_1 = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.EXCLUSIVE),
target_local_rank=1,
)
self.assertEqual(
command_args_1,
# Does not get an extra physical core, since the 1st GPU already took the extra.
# GPU 1 gets 1 physical core (CPUs 4-5)
("numactl", "--physcpubind=4-5", "echo", "Hello, world!"),
)
def test_exclusive_raises_if_too_few_physical_cores(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with self.assertRaisesRegex(
RuntimeError,
"There are only 1 physical cores on numa_node_index=0, but there are 2 GPUs associated with this NUMA node.",
):
self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.EXCLUSIVE),
target_local_rank=1,
)
def test_core_complex_numa_binding_with_extra_l3(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=3,
num_physical_core_per_l3_cache=3,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.CORE_COMPLEX),
target_local_rank=3,
)
self.assertEqual(
command_args,
# GPU 3 is on numa node 3 // 2 = 1, relative GPU index is 3 % 2 = 1
# The second L3 on the second numa node (numa node 1)
# Second numa node starts at CPU 18, second L3 cache is CPUs 24-29
("numactl", "--physcpubind=24-29", "echo", "Hello, world!"),
)
def test_core_complex_numa_binding_with_fewer_l3_than_gpu(self) -> None:
self._add_mock_hardware(
num_sockets=2,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=3,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.CORE_COMPLEX),
target_local_rank=3,
)
self.assertEqual(
command_args,
# GPU 3 is on numa node 3 // 2 = 1, relative GPU index is 3 % 2 = 1
# With 1 L3 cache per numa node, GPU 3 uses L3 cache index 1 % 1 = 0 (the only cache)
# Second numa node starts at CPU 6, single L3 cache spans CPUs 6-11
("numactl", "--physcpubind=6-11", "echo", "Hello, world!"),
)
def test_core_complex_prefers_caches_with_more_cpus(self) -> None:
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=3,
num_physical_core_per_l3_cache=3,
)
# Only some subset of the CPUs are available this time.
with patch("os.sched_getaffinity", return_value={0, 4, 6, 7, 9}):
command_args = (
self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.CORE_COMPLEX),
target_local_rank=0,
)
)
# Binds to the second L3 because it has the most available CPUs
self.assertEqual(
command_args,
("numactl", "--physcpubind=6-7,9", "echo", "Hello, world!"),
)
def test_core_complex_tiebreak_prefers_lower_cache_key(self) -> None:
"""
When several max‑level caches expose the same number of logical CPUs,
prioritize binding to caches with lower cpu indices first.
"""
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=2,
num_physical_core_per_l3_cache=1,
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.CORE_COMPLEX),
target_local_rank=0,
)
# 1 numa node, 2 L3 caches, 1 physical core per L3 cache = 2 logical CPUs per cache
# L3 cache 0: CPUs 0-1, L3 cache 1: CPUs 2-3
# Both have same number of CPUs, so prefer lower cache key (0)
self.assertEqual(
command_args,
("numactl", "--physcpubind=0-1", "echo", "Hello, world!"),
)
def test_binds_to_node_0_if_node_stored_as_minus_one(self) -> None:
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
device_0_properties = self._mock_get_device_properties(0)
# Overwrite the existing mock file
self._mock_file_contents(
file_path=self._get_corresponding_pci_numa_node_file_path(
device_properties=device_0_properties
),
contents="-1",
)
command_args = self._start_processes_for_str_entrypoint_and_get_command_args(
numa_options=NumaOptions(affinity_mode=AffinityMode.NODE),
target_local_rank=0,
)
# GPU 0 has numa node stored as -1, which is treated as numa node 0
# Each numa node has 1 * 1 * 2 = 2 logical CPUs
# Numa node 0 has CPUs 0-1
self.assertEqual(
command_args,
("numactl", "--physcpubind=0-1", "echo", "Hello, world!"),
)
def test_callable_entrypoint_basic(self) -> None:
self._add_mock_hardware(
num_sockets=4,
num_numa_nodes_per_socket=2,
num_gpus_per_numa_node=2,
num_l3_caches_per_numa_node=4,
num_physical_core_per_l3_cache=2,
)
bound_logical_cpu_indices = self._start_processes_for_callable_entrypoint_and_get_sched_setaffinity_cpus(
numa_options=NumaOptions(affinity_mode=AffinityMode.NODE),
target_local_rank=11,
)
self.assertEqual(
bound_logical_cpu_indices,
# There are 8 numa nodes and 2 GPUs per numa node, so GPU 11 would be
# on numa node 11 // 2 = 5.
# Each numa node has 4 * 2 * 2 = 16 logical CPUs
# Numa node 5 has CPUs 80-95
set(range(80, 96)),
)
def test_raises_if_binding_to_empty_set(self) -> None:
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
with (
patch(
"torch.numa.binding._get_logical_cpus_to_bind_to", return_value=set()
),
self.assertRaisesRegex(
RuntimeError, "Must bind to a non-empty set of CPU indices"
),
):
self._start_processes_for_callable_entrypoint_and_get_sched_setaffinity_cpus(
numa_options=NumaOptions(affinity_mode=AffinityMode.NODE),
target_local_rank=0,
)
def test_get_set_of_int_from_ranges_str(self) -> None:
self.assertEqual(
_get_set_of_int_from_ranges_str("0-2,4,6-7"), {0, 1, 2, 4, 6, 7}
)
def test_get_range_str_from_ints(self) -> None:
self.assertEqual(_get_ranges_str_from_ints([7, 0, 1, 6, 2, 4]), "0-2,4,6-7")
def test_bind_all_threads_in_current_process_to_logical_cpus(self) -> None:
self._add_mock_hardware(
num_sockets=1,
num_numa_nodes_per_socket=1,
num_gpus_per_numa_node=1,
num_l3_caches_per_numa_node=1,
num_physical_core_per_l3_cache=1,
)
self._mock_file_contents(file_path="/proc/self/task/8675309", contents="")
self._mock_file_contents(file_path="/proc/self/task/1111111", contents="")
self._mock_file_contents(
# The exception from casting not_an_integer to int should get silenced.
file_path="/proc/self/task/not_an_integer",
contents="",
)
# Mock the original affinity for all threads
original_main_thread_affinity = set(range(8))
manually_reaffinitized_thread_affinity = {0}
def mock_sched_getaffinity_impl(tid: int) -> set[int]:
if tid == 1111111:
return manually_reaffinitized_thread_affinity
return original_main_thread_affinity
call_order = []
def mock_sched_setaffinity_impl(tid: int, cpus: set[int]) -> None:
call_order.append((tid, cpus))
with (
patch("os.sched_getaffinity", side_effect=mock_sched_getaffinity_impl),
patch(
"os.sched_setaffinity", side_effect=mock_sched_setaffinity_impl
) as mock_sched_setaffinity,
):
_bind_all_threads_in_current_process_to_logical_cpus(
logical_cpu_indices={2, 0, 9} # arbitrary
)
# Should set affinity for main thread (0) and thread 8675309 (same affinity as main)
# Should NOT set affinity for thread 1111111 (manually reaffinitized)
self.assertEqual(mock_sched_setaffinity.call_count, 2)
mock_sched_setaffinity.assert_any_call(0, {2, 0, 9})
mock_sched_setaffinity.assert_any_call(8675309, {2, 0, 9})
# Verify that thread 1111111 was NOT reaffinitized
for tid, _ in call_order:
self.assertNotEqual(
tid, 1111111, "Thread 1111111 should not have been reaffinitized"
)
if __name__ == "__main__":
run_tests()
| NumaBindingTest |
python | Lightning-AI__lightning | src/lightning/pytorch/_graveyard/tpu.py | {
"start": 2866,
"end": 3319
} | class ____(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.pytorch.plugins.precision.xlabf16.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUBf16PrecisionPlugin` class is deprecated. Use"
" `lightning.pytorch.plugins.precision.XLAPrecision` instead."
)
super().__init__(precision="bf16-true")
| TPUBf16PrecisionPlugin |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 39095,
"end": 40218
} | class ____:
def __init__(
self, token: milvus_types.AnalyzerToken, with_hash: bool = False, with_detail: bool = False
):
self.dict = {"token": token.token}
if with_detail:
self.dict["start_offset"] = token.start_offset
self.dict["end_offset"] = token.end_offset
self.dict["position"] = token.position
self.dict["position_length"] = token.position_length
if with_hash:
self.dict["hash"] = token.hash
@property
def token(self):
return self.dict["token"]
@property
def start_offset(self):
return self.dict["start_offset"]
@property
def end_offset(self):
return self.dict["end_offset"]
@property
def position(self):
return self.dict["position"]
@property
def position_length(self):
return self.dict["position_length"]
@property
def hash(self):
return self.dict["hash"]
def __getitem__(self, key: str):
return self.dict[key]
def __str__(self):
return str(self.dict)
__repr__ = __str__
| AnalyzeToken |
python | numba__numba | numba/core/rewrites/static_binop.py | {
"start": 132,
"end": 1146
} | class ____(Rewrite):
"""
Detect constant arguments to select binops.
"""
# Those operators can benefit from a constant-inferred argument
rhs_operators = {'**'}
def match(self, func_ir, block, typemap, calltypes):
self.static_lhs = {}
self.static_rhs = {}
self.block = block
# Find binop expressions with a constant lhs or rhs
for expr in block.find_exprs(op='binop'):
try:
if (expr.fn in self.rhs_operators
and expr.static_rhs is ir.UNDEFINED):
self.static_rhs[expr] = func_ir.infer_constant(expr.rhs)
except errors.ConstantInferenceError:
continue
return len(self.static_lhs) > 0 or len(self.static_rhs) > 0
def apply(self):
"""
Store constant arguments that were detected in match().
"""
for expr, rhs in self.static_rhs.items():
expr.static_rhs = rhs
return self.block
| DetectStaticBinops |
python | realpython__materials | python-pydantic/pydantic_models.py | {
"start": 211,
"end": 317
} | class ____(Enum):
HR = "HR"
SALES = "SALES"
IT = "IT"
ENGINEERING = "ENGINEERING"
| Department |
python | realpython__materials | intro-to-threading/prodcom_queue.py | {
"start": 122,
"end": 1855
} | class ____(queue.Queue):
def __init__(self):
super().__init__(maxsize=10)
def get_message(self, name):
logging.debug("%s:about to get from queue", name)
value = self.get()
logging.debug("%s:got %d from queue", name, value)
return value
def set_message(self, value, name):
logging.debug("%s:about to add %d to queue", name, value)
self.put(value)
logging.debug("%s:added %d to queue", name, value)
def producer(pipeline, event):
"""Pretend we're getting a number from the network."""
while not event.is_set():
message = random.randint(1, 101)
logging.info("Producer got message: %s", message)
pipeline.set_message(message, "Producer")
logging.info("Producer received EXIT event. Exiting")
def consumer(pipeline, event):
"""Pretend we're saving a number in the database."""
while not event.is_set() or not pipeline.empty():
message = pipeline.get_message("Consumer")
logging.info(
"Consumer storing message: %s (queue size=%s)",
message,
pipeline.qsize(),
)
logging.info("Consumer received EXIT event. Exiting")
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
# logging.getLogger().setLevel(logging.DEBUG)
pipeline = Pipeline()
event = threading.Event()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(producer, pipeline, event)
executor.submit(consumer, pipeline, event)
time.sleep(0.1)
logging.info("Main: about to set event")
event.set()
| Pipeline |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/lemonldap/tests.py | {
"start": 246,
"end": 725
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = LemonLDAPProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"email": "dwho@example.com",
"sub": "dwho",
"preferred_username": "dwho",
"name": "Doctor Who"
}
""",
)
def get_expected_to_str(self):
return "dwho@example.com"
| LemonLDAPTests |
python | scipy__scipy | scipy/_build_utils/tempita/_tempita.py | {
"start": 1414,
"end": 1930
} | class ____(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
| TemplateError |
python | GoogleCloudPlatform__python-docs-samples | dataflow/run-inference/main.py | {
"start": 2067,
"end": 4920
} | class ____(beam.PTransform):
"""Asks an language model a prompt message and gets its responses.
Attributes:
model_name: HuggingFace model name compatible with AutoModelForSeq2SeqLM.
state_dict_path: File path to the model's state_dict, can be in Cloud Storage.
max_response_tokens: Maximum number of tokens for the model to generate.
"""
def __init__(
self,
model_name: str,
state_dict_path: str,
max_response_tokens: int = MAX_RESPONSE_TOKENS,
) -> None:
self.model_handler = PytorchModelHandlerTensor(
state_dict_path=state_dict_path,
model_class=AutoModelForSeq2SeqLM.from_config,
model_params={"config": AutoConfig.from_pretrained(model_name)},
inference_fn=make_tensor_model_fn("generate"),
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.max_response_tokens = max_response_tokens
def expand(self, pcollection: beam.PCollection[str]) -> beam.PCollection[str]:
return (
pcollection
| "To tensors" >> beam.Map(to_tensors, self.tokenizer)
| "RunInference"
>> RunInference(
self.model_handler,
inference_args={"max_new_tokens": self.max_response_tokens},
)
| "Get response" >> beam.Map(decode_response, self.tokenizer)
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--messages-topic",
required=True,
help="Pub/Sub topic for input text messages",
)
parser.add_argument(
"--responses-topic",
required=True,
help="Pub/Sub topic for output text responses",
)
parser.add_argument(
"--model-name",
required=True,
help="HuggingFace model name compatible with AutoModelForSeq2SeqLM",
)
parser.add_argument(
"--state-dict-path",
required=True,
help="File path to the model's state_dict, can be in Cloud Storage",
)
args, beam_args = parser.parse_known_args()
logging.getLogger().setLevel(logging.INFO)
beam_options = PipelineOptions(
beam_args,
pickle_library="cloudpickle",
streaming=True,
)
simple_name = args.model_name.split("/")[-1]
pipeline = beam.Pipeline(options=beam_options)
_ = (
pipeline
| "Read from Pub/Sub" >> beam.io.ReadFromPubSub(args.messages_topic)
| "Decode bytes" >> beam.Map(lambda msg: msg.decode("utf-8"))
| f"Ask {simple_name}" >> AskModel(args.model_name, args.state_dict_path)
| "Encode bytes" >> beam.Map(lambda msg: msg.encode("utf-8"))
| "Write to Pub/Sub" >> beam.io.WriteToPubSub(args.responses_topic)
)
pipeline.run()
| AskModel |
python | Netflix__metaflow | test/core/tests/card_timeout.py | {
"start": 72,
"end": 1843
} | class ____(MetaflowTest):
"""
Test that checks if the card decorator works as intended with the timeout decorator.
# This test set an artifact in the steps and also set a timeout to the card argument.
# It will assert the artifact to be None.
"""
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag(
'card(type="test_timeout_card",timeout=10,options=dict(timeout=20),save_errors=False)'
)
@steps(0, ["start"])
def step_start(self):
from metaflow import current
self.task = current.pathspec
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
for step in flow:
if step.name != "start":
continue
if run is None:
# This means CliCheck is in context.
cli_check_dict = checker.artifact_dict(step.name, "task")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
checker.assert_card(
step.name,
task_id,
"timeout_card",
None,
)
else:
# This means MetadataCheck is in context.
meta_check_dict = checker.artifact_dict(step.name, "task")
for task_id in meta_check_dict:
checker.assert_card(step.name, task_id, "timeout_card", None)
| CardTimeoutTest |
python | django__django | tests/model_options/apps.py | {
"start": 224,
"end": 346
} | class ____(AppConfig):
name = "model_options"
default_auto_field = "django.db.models.TextField"
| ModelPKNonAutoConfig |
python | ray-project__ray | python/ray/train/v2/_internal/execution/worker_group/worker_group.py | {
"start": 2372,
"end": 3177
} | class ____:
"""Context for a worker group.
This stores the context that is shared when starting a worker group.
Attributes:
run_attempt_id: The ID of the run attempt.
train_fn_ref: An object store reference to the training function to execute.
num_workers: The number of workers in the worker group.
resources_per_worker: The resources per worker.
placement_strategy: Strategy for placing workers.
bundle_label_selector: Optional label selectors to apply per-bundle for workers.
"""
run_attempt_id: str
train_fn_ref: ObjectRefWrapper[Callable[[], None]]
num_workers: int
resources_per_worker: Dict[str, float]
placement_strategy: str = "PACK"
bundle_label_selector: Optional[Dict[str, str]] = None
| WorkerGroupContext |
python | PyCQA__pylint | tests/functional/r/regression/regression_6531_crash_index_error.py | {
"start": 152,
"end": 707
} | class ____:
def __init__(self):
self.balance = 0
def add_cash(self, earned):
self.balance += earned
def spend_cash(self, spent):
self.balance -= spent
@pytest.fixture
def my_wallet():
'''Returns a Wallet instance with a zero balance'''
return Wallet()
@pytest.mark.parametrize("earned,spent,expected", [
(30, 10, 20),
(20, 2, 18),
])
def test_transactions(my_wallet, earned, spent, expected):
my_wallet.add_cash(earned)
my_wallet.spend_cash(spent)
assert my_wallet.balance == expected
| Wallet |
python | python-poetry__poetry | src/poetry/console/exceptions.py | {
"start": 356,
"end": 436
} | class ____(PoetryConsoleError):
pass
@dataclasses.dataclass
| GroupNotFoundError |
python | google__jax | jax/_src/source_info_util.py | {
"start": 8502,
"end": 9027
} | class ____(contextlib.ContextDecorator):
__slots__ = ['name', 'prev']
def __init__(self, name: str):
self.name = name
def __enter__(self):
self.prev = prev = _source_info_context.context
name_stack = prev.name_stack.extend(self.name)
_source_info_context.context = prev.replace(name_stack=name_stack)
return name_stack
def __exit__(self, exc_type, exc_value, traceback):
_source_info_context.context = self.prev
extend_name_stack = ExtendNameStackContextManager
| ExtendNameStackContextManager |
python | huggingface__transformers | src/transformers/data/processors/squad.py | {
"start": 22937,
"end": 23045
} | class ____(SquadProcessor):
train_file = "train-v1.1.json"
dev_file = "dev-v1.1.json"
| SquadV1Processor |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 1785,
"end": 3858
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user("test", "test@example.com", "test")
def test_get_user_permissions(self):
self.assertEqual(self.user.get_user_permissions(), {"user_perm"})
async def test_aget_user_permissions(self):
self.assertEqual(await self.user.aget_user_permissions(), {"user_perm"})
def test_get_group_permissions(self):
self.assertEqual(self.user.get_group_permissions(), {"group_perm"})
async def test_aget_group_permissions(self):
self.assertEqual(await self.user.aget_group_permissions(), {"group_perm"})
def test_get_all_permissions(self):
self.assertEqual(self.user.get_all_permissions(), {"user_perm", "group_perm"})
async def test_aget_all_permissions(self):
self.assertEqual(
await self.user.aget_all_permissions(), {"user_perm", "group_perm"}
)
def test_has_perm(self):
self.assertIs(self.user.has_perm("user_perm"), True)
self.assertIs(self.user.has_perm("group_perm"), True)
self.assertIs(self.user.has_perm("other_perm", TestObj()), False)
async def test_ahas_perm(self):
self.assertIs(await self.user.ahas_perm("user_perm"), True)
self.assertIs(await self.user.ahas_perm("group_perm"), True)
self.assertIs(await self.user.ahas_perm("other_perm", TestObj()), False)
def test_has_perms_perm_list_invalid(self):
msg = "perm_list must be an iterable of permissions."
with self.assertRaisesMessage(ValueError, msg):
self.user.has_perms("user_perm")
with self.assertRaisesMessage(ValueError, msg):
self.user.has_perms(object())
async def test_ahas_perms_perm_list_invalid(self):
msg = "perm_list must be an iterable of permissions."
with self.assertRaisesMessage(ValueError, msg):
await self.user.ahas_perms("user_perm")
with self.assertRaisesMessage(ValueError, msg):
await self.user.ahas_perms(object())
| BaseBackendTest |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 12527,
"end": 12876
} | class ____(ElementMaker):
def __init__(self, namespaceHTMLElements=True):
initargs = dict(makeelement=html_parser.makeelement)
if namespaceHTMLElements:
initargs.update(namespace=XHTML_NAMESPACE,
nsmap={None: XHTML_NAMESPACE})
ElementMaker.__init__(self, **initargs)
| HTMLElementMaker |
python | huggingface__transformers | src/transformers/utils/quantization_config.py | {
"start": 2507,
"end": 6800
} | class ____:
"""
Mixin class for quantization config
"""
quant_method: QuantizationMethod
@classmethod
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
"""
Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.
Args:
config_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
return_unused_kwargs (`bool`,*optional*, defaults to `False`):
Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in
`PreTrainedModel`.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
if return_unused_kwargs:
return config, kwargs
else:
return config
def to_json_file(self, json_file_path: str | os.PathLike):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
writer.write(json_string)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return copy.deepcopy(self.__dict__)
def __iter__(self):
"""allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
for attr, value in copy.deepcopy(self.__dict__).items():
yield attr, value
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PreTrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# Remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
@dataclass
| QuantizationConfigMixin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py | {
"start": 10951,
"end": 11593
} | class ____(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = True
_is_diag_update_positive = True
_use_v = True
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
@test_util.run_all_without_tensor_float_32(
"Linear op calls matmul which uses TensorFloat-32.")
| LinearOperatorLowRankUpdatetestWithDiagNotSquare |
python | ray-project__ray | python/ray/train/v2/_internal/exceptions.py | {
"start": 2803,
"end": 3038
} | class ____(RayTrainError):
"""Exception raised when the worker group fails to start.
Example scenario: A worker is scheduled onto a node that dies while
the worker actor is initializing.
"""
| WorkerGroupStartupFailedError |
python | huggingface__transformers | src/transformers/models/fsmt/tokenization_fsmt.py | {
"start": 3225,
"end": 17873
} | class ____(PreTrainedTokenizer):
"""
Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
"__classify__") to a vocabulary.
- The argument `langs` defines a pair of languages.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
langs (`List[str]`, *optional*):
A list of two languages to translate from and to, for instance `["en", "ru"]`.
src_vocab_file (`str`, *optional*):
File containing the vocabulary for the source language.
tgt_vocab_file (`st`, *optional*):
File containing the vocabulary for the target language.
merges_file (`str`, *optional*):
File containing the merges.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
langs=None,
src_vocab_file=None,
tgt_vocab_file=None,
merges_file=None,
do_lower_case=False,
unk_token="<unk>",
bos_token="<s>",
sep_token="</s>",
pad_token="<pad>",
**kwargs,
):
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
self.src_vocab_file = src_vocab_file
self.tgt_vocab_file = tgt_vocab_file
self.merges_file = merges_file
self.do_lower_case = do_lower_case
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = {}
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = {}
self.cache_moses_detokenizer = {}
if langs and len(langs) == 2:
self.src_lang, self.tgt_lang = langs
else:
raise ValueError(
f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
"Usually that means that tokenizer can't find a mapping for the given model path "
"in and other maps of this tokenizer."
)
with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
self.encoder = json.load(src_vocab_handle)
with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
tgt_vocab = json.load(tgt_vocab_handle)
self.decoder = {v: k for k, v in tgt_vocab.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(
langs=langs,
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
bos_token=bos_token,
sep_token=sep_token,
pad_token=pad_token,
**kwargs,
)
# hack override
def get_vocab(self) -> dict[str, int]:
return self.get_src_vocab()
# hack override
@property
def vocab_size(self) -> int:
return self.src_vocab_size
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
return self.cache_moses_punct_normalizer[lang].normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
return self.cache_moses_tokenizer[lang].tokenize(
text, aggressive_dash_splits=True, return_str=False, escape=True
)
def moses_detokenize(self, tokens, lang):
if lang not in self.cache_moses_detokenizer:
moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
self.cache_moses_detokenizer[lang] = moses_detokenizer
return self.cache_moses_detokenizer[lang].detokenize(tokens)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
@property
def src_vocab_size(self):
return len(self.encoder)
@property
def tgt_vocab_size(self):
return len(self.decoder)
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.decoder, **self.added_tokens_decoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
"""
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
# ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
# if lang != self.src_lang:
# raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
lang = self.src_lang
if self.do_lower_case:
text = text.lower()
if bypass_tokenizer:
text = text.split()
else:
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# remove BPE
tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
tokens = "".join(tokens).split()
# detokenize
text = self.moses_detokenize(tokens, self.tgt_lang)
return text
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A FAIRSEQ Transformer sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
# no bos used in fairseq
if token_ids_1 is None:
return token_ids_0 + sep
return token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# no bos used in fairseq
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return ([0] * len(token_ids_0)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
src_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
)
tgt_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
)
merges_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(src_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
with open(tgt_vocab_file, "w", encoding="utf-8") as f:
tgt_vocab = {v: k for k, v in self.decoder.items()}
f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merges_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return src_vocab_file, tgt_vocab_file, merges_file
def __getstate__(self):
state = self.__dict__.copy()
state["sm"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
__all__ = ["FSMTTokenizer"]
| FSMTTokenizer |
python | davidhalter__jedi | jedi/api/helpers.py | {
"start": 2374,
"end": 6787
} | class ____(Exception):
@property
def error_leaf(self):
return self.args[0]
def _get_code_for_stack(code_lines, leaf, position):
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
# If we're not on a comment simply get the previous leaf and proceed.
leaf = leaf.get_previous_leaf()
if leaf is None:
return '' # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
leaf = leaf.get_previous_leaf()
if leaf is None:
return ''
if leaf.type == 'error_leaf' or leaf.type == 'string':
if leaf.start_pos[0] < position[0]:
# On a different line, we just begin anew.
return ''
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
user_stmt = leaf
while True:
if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
break
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without value (part of a suite).
return ''
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, leaf, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
tokens = grammar._tokenize(code)
for token in tokens:
if token.string == safeword:
raise EndMarkerReached()
elif token.prefix.endswith(safeword):
# This happens with comments.
raise EndMarkerReached()
elif token.string.endswith(safeword):
yield token # Probably an f-string literal that was not finished.
raise EndMarkerReached()
else:
yield token
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, leaf, pos))
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + ' ' + safeword
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return p.stack
raise SystemError(
"This really shouldn't happen. There's a bug in Jedi:\n%s"
% list(tokenize_without_endmarker(code))
)
def infer(inference_state, context, leaf):
if leaf.type == 'name':
return inference_state.infer(context, leaf)
parent = leaf.parent
definitions = NO_VALUES
if parent.type == 'atom':
# e.g. `(a + b)`
definitions = context.infer_node(leaf.parent)
elif parent.type == 'trailer':
# e.g. `a()`
definitions = infer_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
# e.g. `"foo"` or `1.0`
return infer_atom(context, leaf)
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_value_set(inference_state)
return definitions
def filter_follow_imports(names, follow_builtin_imports=False):
for name in names:
if name.is_import():
new_names = list(filter_follow_imports(
name.goto(),
follow_builtin_imports=follow_builtin_imports,
))
found_builtin = False
if follow_builtin_imports:
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
if found_builtin:
yield name
else:
yield from new_names
else:
yield name
| OnErrorLeaf |
python | huggingface__transformers | src/transformers/models/pegasus_x/configuration_pegasus_x.py | {
"start": 798,
"end": 7954
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a
PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PEGASUS-X
[google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 96103):
Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`PegasusXModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimension of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 16):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 16):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
num_global_tokens (`int`, *optional*, defaults to 128):
Number of global tokens to use for the encoder
block_size (`int`, *optional*, defaults to 512):
Block size for encoder local attention. Sequence length should be an exact multiple of block size.
block_size must be a multiple of 2 if stagger_local_block is True
stagger_local_block (`bool`, *optional*, defaults to `True`):
Whether to stagger every other local attention by half a block
Example:
```python
>>> from transformers import PegasusXConfig, PegasusXModel
>>> # Initializing a PEGASUS google/pegasus-x-large style configuration
>>> configuration = PegasusXConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration
>>> model = PegasusXModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pegasus_x"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=96103,
max_position_embeddings=16384,
encoder_layers=16,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=16,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=0,
scale_embedding=True,
pad_token_id=0,
eos_token_id=1,
forced_eos_token_id=1,
num_global_tokens=32,
block_size=512,
stagger_local_blocks=True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.num_global_tokens = num_global_tokens
self.block_size = block_size
self.stagger_local_blocks = stagger_local_blocks
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
__all__ = ["PegasusXConfig"]
| PegasusXConfig |
python | django__django | tests/handlers/test_exception.py | {
"start": 221,
"end": 1917
} | class ____(SimpleTestCase):
def get_suspicious_environ(self):
payload = FakePayload("a=1&a=2&a=3\r\n")
return {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
"SERVER_NAME": "test",
"SERVER_PORT": "8000",
}
@override_settings(DATA_UPLOAD_MAX_MEMORY_SIZE=12)
def test_data_upload_max_memory_size_exceeded(self):
response = WSGIHandler()(self.get_suspicious_environ(), lambda *a, **k: None)
self.assertEqual(response.status_code, 400)
@override_settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=2)
def test_data_upload_max_number_fields_exceeded(self):
response = WSGIHandler()(self.get_suspicious_environ(), lambda *a, **k: None)
self.assertEqual(response.status_code, 400)
@override_settings(DATA_UPLOAD_MAX_NUMBER_FILES=2)
def test_data_upload_max_number_files_exceeded(self):
payload = FakePayload(
encode_multipart(
BOUNDARY,
{
"a.txt": "Hello World!",
"b.txt": "Hello Django!",
"c.txt": "Hello Python!",
},
)
)
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": MULTIPART_CONTENT,
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
"SERVER_NAME": "test",
"SERVER_PORT": "8000",
}
response = WSGIHandler()(environ, lambda *a, **k: None)
self.assertEqual(response.status_code, 400)
| ExceptionHandlerTests |
python | getsentry__sentry | tests/acceptance/test_project_release_tracking_settings.py | {
"start": 132,
"end": 1248
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.path1 = f"/{self.org.slug}/{self.project.slug}/settings/release-tracking/"
def test_tags_list(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"environment": "prod",
"release": "first",
"tags": {"Foo": "value"},
},
project_id=self.project.id,
)
self.browser.get(self.path1)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
| ProjectReleaseTrackingSettingsTest |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 95935,
"end": 97535
} | class ____(PallasBaseTest):
def test_pass_weird_tuple_into_pallas_call(self):
xt = WeirdTuple(x0=jnp.ones((8, 8)), x1=jnp.zeros((8,)))
def kernel(xt_ref, ot_ref):
xt = xt_ref[...]
ot_ref[...] = xt
ot = self.pallas_call(kernel, out_shape=jax.typeof(xt))(xt)
self.assertArraysEqual(ot.x0, xt.x0)
self.assertArraysEqual(ot.x1, xt.x1)
def test_pass_sliced_array_into_pallas_call(self):
xs = SlicedArray(
x=jnp.arange(8 * 16 * 128).reshape(8, 16, 128),
s=jnp.array([2], jnp.int32),
)
def kernel(xs_ref, o_ref):
x = index_p.bind(xs_ref)
o_ref[...] = x
o = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct(xs.shape, xs.dtype),
in_specs=[pl.BlockSpec((8, 128), lambda i: (i, 0))],
out_specs=pl.BlockSpec((8, 128), lambda i: (i, 0)),
grid=(2,)
)(xs)
self.assertArraysEqual(o, xs.x[xs.s[0]])
def test_pass_hi_type_with_aliasing(self):
xs = SlicedArray(
x=jnp.arange(8 * 16 * 128).reshape(8, 16, 128),
s=jnp.array([2], jnp.int32),
)
def kernel(xs_ref, o_ref):
o_ref[...] = xs_ref[...]
@jax.jit
def f(xs):
return self.pallas_call(
kernel, out_shape=jax.typeof(xs),
in_specs=[pl.BlockSpec((8, 128), lambda i: (i, 0))],
out_specs=pl.BlockSpec((8, 128), lambda i: (i, 0)),
grid=(2,),
input_output_aliases={0: 0}
)(xs)
os = f(xs)
self.assertArraysEqual(os.x, xs.x)
self.assertArraysEqual(os.s, xs.s)
if __name__ == "__main__":
absltest.main()
| PallasHiJaxTest |
python | ApeWorX__ape | tests/functional/conversion/test_address.py | {
"start": 812,
"end": 1450
} | class ____:
@pytest.fixture(scope="class")
def converter(self):
return HexAddressConverter()
def test_is_convertible_hex_str(self, converter):
assert not converter.is_convertible("0x123")
def test_is_convertible_address(self, converter, owner):
# Is already an address!
assert not converter.is_convertible(str(owner.address))
def test_convert_not_canonical_address(self, converter):
actual = converter.convert("0x0ffffffaaaaaaaabbbbbbb333337777eeeeeee00")
expected = "0x0fFFfffAaAaAaAaBBBbBbb333337777eeeeeEe00"
assert actual == expected
| TestHexAddressConverter |
python | huggingface__transformers | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | {
"start": 5985,
"end": 6098
} | class ____(SamVisionNeck):
def __init__(self, config):
super().__init__(config)
| DeepseekVLSamVisionNeck |
python | numba__numba | numba/core/typing/templates.py | {
"start": 612,
"end": 8018
} | class ____(object):
"""
The signature of a function call or operation, i.e. its argument types
and return type.
"""
# XXX Perhaps the signature should be a BoundArguments, instead
# of separate args and pysig...
__slots__ = '_return_type', '_args', '_recvr', '_pysig'
def __init__(self, return_type, args, recvr, pysig=None):
if isinstance(args, list):
args = tuple(args)
self._return_type = return_type
self._args = args
self._recvr = recvr
self._pysig = pysig
@property
def return_type(self):
return self._return_type
@property
def args(self):
return self._args
@property
def recvr(self):
return self._recvr
@property
def pysig(self):
return self._pysig
def replace(self, **kwargs):
"""Copy and replace the given attributes provided as keyword arguments.
Returns an updated copy.
"""
curstate = dict(return_type=self.return_type,
args=self.args,
recvr=self.recvr,
pysig=self.pysig)
curstate.update(kwargs)
return Signature(**curstate)
def __getstate__(self):
"""
Needed because of __slots__.
"""
return self._return_type, self._args, self._recvr, self._pysig
def __setstate__(self, state):
"""
Needed because of __slots__.
"""
self._return_type, self._args, self._recvr, self._pysig = state
def __hash__(self):
return hash((self.args, self.return_type))
def __eq__(self, other):
if isinstance(other, Signature):
return (self.args == other.args and
self.return_type == other.return_type and
self.recvr == other.recvr and
self.pysig == other.pysig)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s -> %s" % (self.args, self.return_type)
@property
def is_method(self):
"""
Whether this signature represents a bound method or a regular
function.
"""
return self.recvr is not None
def as_method(self):
"""
Convert this signature to a bound method signature.
"""
if self.recvr is not None:
return self
sig = signature(self.return_type, *self.args[1:],
recvr=self.args[0])
# Adjust the python signature
params = list(self.pysig.parameters.values())[1:]
sig = sig.replace(
pysig=utils.pySignature(
parameters=params,
return_annotation=self.pysig.return_annotation,
),
)
return sig
def as_function(self):
"""
Convert this signature to a regular function signature.
"""
if self.recvr is None:
return self
sig = signature(self.return_type, *((self.recvr,) + self.args))
return sig
def as_type(self):
"""
Convert this signature to a first-class function type.
"""
return types.FunctionType(self)
def __unliteral__(self):
return signature(types.unliteral(self.return_type),
*map(types.unliteral, self.args))
def dump(self, tab=''):
c = self.as_type()._code
print(f'{tab}DUMP {type(self).__name__} [type code: {c}]')
print(f'{tab} Argument types:')
for a in self.args:
a.dump(tab=tab + ' | ')
print(f'{tab} Return type:')
self.return_type.dump(tab=tab + ' | ')
print(f'{tab}END DUMP')
def is_precise(self):
for atype in self.args:
if not atype.is_precise():
return False
return self.return_type.is_precise()
def make_concrete_template(name, key, signatures):
baseclasses = (ConcreteTemplate,)
gvars = dict(key=key, cases=list(signatures))
return type(name, baseclasses, gvars)
def make_callable_template(key, typer, recvr=None):
"""
Create a callable template with the given key and typer function.
"""
def generic(self):
return typer
name = "%s_CallableTemplate" % (key,)
bases = (CallableTemplate,)
class_dict = dict(key=key, generic=generic, recvr=recvr)
return type(name, bases, class_dict)
def signature(return_type, *args, **kws):
recvr = kws.pop('recvr', None)
assert not kws
return Signature(return_type, args, recvr=recvr)
def fold_arguments(pysig, args, kws, normal_handler, default_handler,
stararg_handler):
"""
Given the signature *pysig*, explicit *args* and *kws*, resolve
omitted arguments and keyword arguments. A tuple of positional
arguments is returned.
Various handlers allow to process arguments:
- normal_handler(index, param, value) is called for normal arguments
- default_handler(index, param, default) is called for omitted arguments
- stararg_handler(index, param, values) is called for a "*args" argument
"""
if isinstance(kws, Sequence):
# Normalize dict kws
kws = dict(kws)
# deal with kwonly args
params = pysig.parameters
kwonly = []
for name, p in params.items():
if p.kind == p.KEYWORD_ONLY:
kwonly.append(name)
if kwonly:
bind_args = args[:-len(kwonly)]
else:
bind_args = args
bind_kws = kws.copy()
if kwonly:
for idx, n in enumerate(kwonly):
bind_kws[n] = args[len(kwonly) + idx]
# now bind
try:
ba = pysig.bind(*bind_args, **bind_kws)
except TypeError as e:
# The binding attempt can raise if the args don't match up, this needs
# to be converted to a TypingError so that e.g. partial type inference
# doesn't just halt.
msg = (f"Cannot bind 'args={bind_args} kws={bind_kws}' to "
f"signature '{pysig}' due to \"{type(e).__name__}: {e}\".")
raise TypingError(msg)
for i, param in enumerate(pysig.parameters.values()):
name = param.name
default = param.default
if param.kind == param.VAR_POSITIONAL:
# stararg may be omitted, in which case its "default" value
# is simply the empty tuple
if name in ba.arguments:
argval = ba.arguments[name]
# NOTE: avoid wrapping the tuple type for stararg in another
# tuple.
if (len(argval) == 1 and
isinstance(argval[0], (types.StarArgTuple,
types.StarArgUniTuple))):
argval = tuple(argval[0])
else:
argval = ()
out = stararg_handler(i, param, argval)
ba.arguments[name] = out
elif name in ba.arguments:
# Non-stararg, present
ba.arguments[name] = normal_handler(i, param, ba.arguments[name])
else:
# Non-stararg, omitted
assert default is not param.empty
ba.arguments[name] = default_handler(i, param, default)
# Collect args in the right order
args = tuple(ba.arguments[param.name]
for param in pysig.parameters.values())
return args
| Signature |
python | pytorch__pytorch | test/test_utils.py | {
"start": 35230,
"end": 35912
} | class ____(TestCase):
def test_import_imported(self):
self.assertIn("os", sys.modules)
os_module = try_import("os")
self.assertIs(os_module, os)
def test_import_existing(self):
self.assertNotIn("imaplib", sys.modules)
imaplib_module = try_import("imaplib")
self.assertIsNotNone(imaplib_module)
self.assertFalse(hasattr(imaplib_module, "not_attribute"))
self.assertTrue(hasattr(imaplib_module, "IMAP4"))
def test_import_missing(self):
missing_module = try_import("missing_module")
self.assertIsNone(missing_module)
@deprecated()
def _deprecated_api(x, y=15):
return x + y
| TestTryImport |
python | pydata__xarray | xarray/backends/zarr.py | {
"start": 5929,
"end": 20812
} | class ____(BackendArray):
__slots__ = ("_array", "dtype", "shape")
def __init__(self, zarr_array):
# some callers attempt to evaluate an array if an `array` property exists on the object.
# we prefix with _ to avoid this inference.
# TODO type hint this?
self._array = zarr_array
self.shape = self._array.shape
# preserve vlen string object dtype (GH 7328)
if (
not _zarr_v3()
and self._array.filters is not None
and any(filt.codec_id == "vlen-utf8" for filt in self._array.filters)
):
dtype = coding.strings.create_vlen_dtype(str)
else:
dtype = self._array.dtype
self.dtype = dtype
def get_array(self):
return self._array
def _oindex(self, key):
return self._array.oindex[key]
def _vindex(self, key):
return self._array.vindex[key]
def _getitem(self, key):
return self._array[key]
async def _async_getitem(self, key):
if not _zarr_v3():
raise NotImplementedError(
"For lazy basic async indexing with zarr, zarr-python=>v3.0.0 is required"
)
async_array = self._array._async_array
return await async_array.getitem(key)
async def _async_oindex(self, key):
if not has_zarr_async_index():
raise NotImplementedError(
"For lazy orthogonal async indexing with zarr, zarr-python=>v3.1.2 is required"
)
async_array = self._array._async_array
return await async_array.oindex.getitem(key)
async def _async_vindex(self, key):
if not has_zarr_async_index():
raise NotImplementedError(
"For lazy vectorized async indexing with zarr, zarr-python=>v3.1.2 is required"
)
async_array = self._array._async_array
return await async_array.vindex.getitem(key)
def __getitem__(self, key):
array = self._array
if isinstance(key, indexing.BasicIndexer):
method = self._getitem
elif isinstance(key, indexing.VectorizedIndexer):
method = self._vindex
elif isinstance(key, indexing.OuterIndexer):
method = self._oindex
return indexing.explicit_indexing_adapter(
key, array.shape, indexing.IndexingSupport.VECTORIZED, method
)
# if self.ndim == 0:
# could possibly have a work-around for 0d data here
async def async_getitem(self, key):
array = self._array
if isinstance(key, indexing.BasicIndexer):
method = self._async_getitem
elif isinstance(key, indexing.VectorizedIndexer):
method = self._async_vindex
elif isinstance(key, indexing.OuterIndexer):
method = self._async_oindex
return await indexing.async_explicit_indexing_adapter(
key, array.shape, indexing.IndexingSupport.VECTORIZED, method
)
def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name):
"""
Given encoding chunks (possibly None or []) and variable chunks
(possibly None or []).
"""
# zarr chunk spec:
# chunks : int or tuple of ints, optional
# Chunk shape. If not provided, will be guessed from shape and dtype.
# if there are no chunks in encoding and the variable data is a numpy
# array, then we let zarr use its own heuristics to pick the chunks
if not var_chunks and not enc_chunks:
return None
# if there are no chunks in encoding but there are dask chunks, we try to
# use the same chunks in zarr
# However, zarr chunks needs to be uniform for each array
# https://zarr-specs.readthedocs.io/en/latest/v2/v2.0.html#chunks
# while dask chunks can be variable sized
# https://dask.pydata.org/en/latest/array-design.html#chunks
if var_chunks and not enc_chunks:
if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks):
raise ValueError(
"Zarr requires uniform chunk sizes except for final chunk. "
f"Variable named {name!r} has incompatible dask chunks: {var_chunks!r}. "
"Consider rechunking using `chunk()`."
)
if any((chunks[0] < chunks[-1]) for chunks in var_chunks):
raise ValueError(
"Final chunk of Zarr array must be the same size or smaller "
f"than the first. Variable named {name!r} has incompatible Dask chunks {var_chunks!r}."
"Consider either rechunking using `chunk()` or instead deleting "
"or modifying `encoding['chunks']`."
)
# return the first chunk for each dimension
return tuple(chunk[0] for chunk in var_chunks)
# From here on, we are dealing with user-specified chunks in encoding
# zarr allows chunks to be an integer, in which case it uses the same chunk
# size on each dimension.
# Here we re-implement this expansion ourselves. That makes the logic of
# checking chunk compatibility easier
if isinstance(enc_chunks, integer_types):
enc_chunks_tuple = ndim * (enc_chunks,)
else:
enc_chunks_tuple = tuple(enc_chunks)
if len(enc_chunks_tuple) != ndim:
# throw away encoding chunks, start over
return _determine_zarr_chunks(
None,
var_chunks,
ndim,
name,
)
for x in enc_chunks_tuple:
if not isinstance(x, int):
raise TypeError(
"zarr chunk sizes specified in `encoding['chunks']` "
"must be an int or a tuple of ints. "
f"Instead found encoding['chunks']={enc_chunks_tuple!r} "
f"for variable named {name!r}."
)
# if there are chunks in encoding and the variable data is a numpy array,
# we use the specified chunks
if not var_chunks:
return enc_chunks_tuple
return enc_chunks_tuple
def _get_zarr_dims_and_attrs(zarr_obj, dimension_key, try_nczarr):
# Zarr V3 explicitly stores the dimension names in the metadata
try:
# if this exists, we are looking at a Zarr V3 array
# convert None to empty tuple
dimensions = zarr_obj.metadata.dimension_names or ()
except AttributeError:
# continue to old code path
pass
else:
attributes = dict(zarr_obj.attrs)
if len(zarr_obj.shape) != len(dimensions):
raise KeyError(
"Zarr object is missing the `dimension_names` metadata which is "
"required for xarray to determine variable dimensions."
)
return dimensions, attributes
# Zarr arrays do not have dimensions. To get around this problem, we add
# an attribute that specifies the dimension. We have to hide this attribute
# when we send the attributes to the user.
# zarr_obj can be either a zarr group or zarr array
try:
# Xarray-Zarr
dimensions = zarr_obj.attrs[dimension_key]
except KeyError as e:
if not try_nczarr:
raise KeyError(
f"Zarr object is missing the attribute `{dimension_key}`, which is "
"required for xarray to determine variable dimensions."
) from e
# NCZarr defines dimensions through metadata in .zarray
zarray_path = os.path.join(zarr_obj.path, ".zarray")
if _zarr_v3():
import asyncio
zarray_str = asyncio.run(zarr_obj.store.get(zarray_path)).to_bytes()
else:
zarray_str = zarr_obj.store.get(zarray_path)
zarray = json.loads(zarray_str)
try:
# NCZarr uses Fully Qualified Names
dimensions = [
os.path.basename(dim) for dim in zarray["_NCZARR_ARRAY"]["dimrefs"]
]
except KeyError as e:
raise KeyError(
f"Zarr object is missing the attribute `{dimension_key}` and the NCZarr metadata, "
"which are required for xarray to determine variable dimensions."
) from e
nc_attrs = [attr for attr in zarr_obj.attrs if attr.lower().startswith("_nc")]
attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key] + nc_attrs)
return dimensions, attributes
def extract_zarr_variable_encoding(
variable,
raise_on_invalid=False,
name=None,
*,
zarr_format: ZarrFormat,
):
"""
Extract zarr encoding dictionary from xarray Variable
Parameters
----------
variable : Variable
raise_on_invalid : bool, optional
name: str | Hashable, optional
zarr_format: Literal[2,3]
Returns
-------
encoding : dict
Zarr encoding for `variable`
"""
encoding = variable.encoding.copy()
safe_to_drop = {"source", "original_shape", "preferred_chunks"}
valid_encodings = {
"chunks",
"shards",
"compressor", # TODO: delete when min zarr >=3
"compressors",
"filters",
"serializer",
"cache_metadata",
"write_empty_chunks",
"chunk_key_encoding",
}
if zarr_format == 3:
valid_encodings.add("fill_value")
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if "fill_value" in invalid and zarr_format == 2:
msg = " Use `_FillValue` to set the Zarr array `fill_value`"
else:
msg = ""
if invalid:
raise ValueError(
f"unexpected encoding parameters for zarr backend: {invalid!r}." + msg
)
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
chunks = _determine_zarr_chunks(
enc_chunks=encoding.get("chunks"),
var_chunks=variable.chunks,
ndim=variable.ndim,
name=name,
)
if _zarr_v3() and chunks is None:
chunks = "auto"
encoding["chunks"] = chunks
return encoding
# Function below is copied from conventions.encode_cf_variable.
# The only change is to raise an error for object dtypes.
def encode_zarr_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : Variable
A variable holding un-encoded data.
Returns
-------
out : Variable
A variable which has been encoded as described above.
"""
var = conventions.encode_cf_variable(var, name=name)
var = ensure_dtype_not_object(var, name=name)
# zarr allows unicode, but not variable-length strings, so it's both
# simpler and more compact to always encode as UTF-8 explicitly.
# TODO: allow toggling this explicitly via dtype in encoding.
# TODO: revisit this now that Zarr _does_ allow variable-length strings
coder = coding.strings.EncodedStringCoder(allows_unicode=True)
var = coder.encode(var, name=name)
var = coding.strings.ensure_fixed_length_bytes(var)
return var
def _validate_datatypes_for_zarr_append(vname, existing_var, new_var):
"""If variable exists in the store, confirm dtype of the data to append is compatible with
existing dtype.
"""
if (
np.issubdtype(new_var.dtype, np.number)
or np.issubdtype(new_var.dtype, np.datetime64)
or np.issubdtype(new_var.dtype, np.bool_)
or new_var.dtype == object
or (new_var.dtype.kind in ("S", "U") and existing_var.dtype == object)
):
# We can skip dtype equality checks under two conditions: (1) if the var to append is
# new to the dataset, because in this case there is no existing var to compare it to;
# or (2) if var to append's dtype is known to be easy-to-append, because in this case
# we can be confident appending won't cause problems. Examples of dtypes which are not
# easy-to-append include length-specified strings of type `|S*` or `<U*` (where * is a
# positive integer character length). For these dtypes, appending dissimilar lengths
# can result in truncation of appended data. Therefore, variables which already exist
# in the dataset, and with dtypes which are not known to be easy-to-append, necessitate
# exact dtype equality, as checked below.
pass
elif new_var.dtype != existing_var.dtype:
raise ValueError(
f"Mismatched dtypes for variable {vname} between Zarr store on disk "
f"and dataset to append. Store has dtype {existing_var.dtype} but "
f"dataset to append has dtype {new_var.dtype}."
)
def _validate_and_transpose_existing_dims(
var_name, new_var, existing_var, region, append_dim
):
if new_var.dims != existing_var.dims:
if set(existing_var.dims) == set(new_var.dims):
new_var = new_var.transpose(*existing_var.dims)
else:
raise ValueError(
f"variable {var_name!r} already exists with different "
f"dimension names {existing_var.dims} != "
f"{new_var.dims}, but changing variable "
f"dimensions is not supported by to_zarr()."
)
existing_sizes = {}
for dim, size in existing_var.sizes.items():
if region is not None and dim in region:
start, stop, stride = region[dim].indices(size)
assert stride == 1 # region was already validated
size = stop - start
if dim != append_dim:
existing_sizes[dim] = size
new_sizes = {dim: size for dim, size in new_var.sizes.items() if dim != append_dim}
if existing_sizes != new_sizes:
raise ValueError(
f"variable {var_name!r} already exists with different "
f"dimension sizes: {existing_sizes} != {new_sizes}. "
f"to_zarr() only supports changing dimension sizes when "
f"explicitly appending, but append_dim={append_dim!r}. "
f"If you are attempting to write to a subset of the "
f"existing store without changing dimension sizes, "
f"consider using the region argument in to_zarr()."
)
return new_var
def _put_attrs(zarr_obj, attrs):
"""Raise a more informative error message for invalid attrs."""
try:
zarr_obj.attrs.put(attrs)
except TypeError as e:
raise TypeError("Invalid attribute in Dataset.attrs.") from e
return zarr_obj
| ZarrArrayWrapper |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 9086,
"end": 17630
} | class ____(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(method)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
| Router |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/util_test.py | {
"start": 13917,
"end": 17426
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testSameDynamicShape(self):
with self.cached_session():
scalar = constant_op.constant(2.0)
scalar1 = array_ops.placeholder(dtype=dtypes.float32)
vector = [0.3, 0.4, 0.5]
vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
multidimensional2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
# Scalar
self.assertTrue(
du.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
du.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
| DynamicShapeTest |
python | mlflow__mlflow | mlflow/gateway/app.py | {
"start": 7359,
"end": 8762
} | class ____(BaseModel):
endpoints: list[Endpoint]
next_page_token: str | None = None
model_config = ConfigDict(
json_schema_extra={
"example": {
"endpoints": [
{
"name": "openai-chat",
"endpoint_type": "llm/v1/chat",
"model": {
"name": "gpt-4o-mini",
"provider": "openai",
},
"limit": {"calls": 1, "key": None, "renewal_period": "minute"},
},
{
"name": "anthropic-completions",
"endpoint_type": "llm/v1/completions",
"model": {
"name": "claude-instant-100k",
"provider": "anthropic",
},
},
{
"name": "cohere-embeddings",
"endpoint_type": "llm/v1/embeddings",
"model": {
"name": "embed-english-v2.0",
"provider": "cohere",
},
},
],
"next_page_token": "eyJpbmRleCI6IDExfQ==",
}
}
)
| ListEndpointsResponse |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 10966,
"end": 13084
} | class ____:
"""Represent a registry of :py:class:`RendezvousHandler` backends."""
_registry: dict[str, RendezvousHandlerCreator]
def __init__(self) -> None:
self._registry = {}
def register(self, backend: str, creator: RendezvousHandlerCreator) -> None:
"""Register a new rendezvous backend.
Args:
backend:
The name of the backend.
creator:
The callback to invoke to construct the
:py:class:`RendezvousHandler`.
"""
if not backend:
raise ValueError("The rendezvous backend name must be a non-empty string.")
current_creator: RendezvousHandlerCreator | None
try:
current_creator = self._registry[backend]
except KeyError:
current_creator = None
if current_creator is not None and current_creator != creator:
raise ValueError(
f"The rendezvous backend '{backend}' cannot be registered with '{creator}' as it "
f"is already registered with '{current_creator}'."
)
self._registry[backend] = creator
def create_handler(self, params: RendezvousParameters) -> RendezvousHandler:
"""Create a new :py:class:`RendezvousHandler`."""
try:
creator = self._registry[params.backend]
except KeyError as e:
raise ValueError(
f"The rendezvous backend '{params.backend}' is not registered. Did you forget "
f"to call `{self.register.__name__}`?"
) from e
handler = creator(params)
# Do some sanity check.
if handler.get_backend() != params.backend:
raise RuntimeError(
f"The rendezvous backend '{handler.get_backend()}' does not match the requested "
f"backend '{params.backend}'."
)
return handler
# The default global registry instance used by launcher scripts to instantiate
# rendezvous handlers.
rendezvous_handler_registry = RendezvousHandlerRegistry()
| RendezvousHandlerRegistry |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/generic_utils.py | {
"start": 8190,
"end": 15503
} | class ____(object):
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of reasons.
# We may end up with a case where we're opening a saving scope within
# another saving scope. In that case, we'd like to use the outermost scope
# available and ignore inner scopes, since there is not (yet) a reasonable
# use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of `AbstractBaseClass`
# that has not overridden `__hash__`), a `TypeError` will be thrown.
# We'll just continue on without shared object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, '_passthrough', False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None):
"""Returns the serialization of the class with the given config."""
base_config = {'class_name': cls_name, 'config': cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've already
# serialized this config. If so, just use that config. This will store an
# extra ID field in the config, allowing us to re-create the shared object
# relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
def register_keras_serializable(package='Custom', name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key 'package>name' where `name`,
defaults to the object name if not passed.
Args:
package: The package that this class belongs to.
name: The name to serialize this class under in this package. If None, the
class' name will be used.
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = package + '>' + class_name
if tf_inspect.isclass(arg) and not hasattr(arg, 'get_config'):
raise ValueError(
'Cannot register a class that does not have a get_config() method.')
if registered_name in _GLOBAL_CUSTOM_OBJECTS:
raise ValueError(
'%s has already been registered to %s' %
(registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))
if arg in _GLOBAL_CUSTOM_NAMES:
raise ValueError('%s has already been registered to %s' %
(arg, _GLOBAL_CUSTOM_NAMES[arg]))
_GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
_GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in _GLOBAL_CUSTOM_NAMES:
return _GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@tf_contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.utils.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library implementers.
Returns:
An instantiable class associated with 'name', or None if no such class
exists.
"""
if name in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
# pylint: disable=g-bad-exception-name
| SharedObjectSavingScope |
python | pytorch__pytorch | tools/setup_helpers/cmake.py | {
"start": 1554,
"end": 19253
} | class ____:
"Manages cmake."
def __init__(self, build_dir: str = BUILD_DIR) -> None:
self._cmake_command = CMake._get_cmake_command()
self.build_dir = build_dir
@property
def _cmake_cache_file(self) -> str:
r"""Returns the path to CMakeCache.txt.
Returns:
string: The path to CMakeCache.txt.
"""
return os.path.join(self.build_dir, "CMakeCache.txt")
@property
def _ninja_build_file(self) -> str:
r"""Returns the path to build.ninja.
Returns:
string: The path to build.ninja.
"""
return os.path.join(self.build_dir, "build.ninja")
@staticmethod
def _get_cmake_command() -> str:
"""Returns cmake command."""
if IS_WINDOWS:
return "cmake"
cmake_versions: list[str] = []
valid_cmake_versions: dict[str, Version] = {}
for cmd in ("cmake", "cmake3"):
command = shutil.which(cmd)
ver = CMake._get_version(command)
if ver is not None:
eprint(f"Found {cmd} ({command}) version: {ver}", end="")
cmake_versions.append(f"{cmd}=={ver}")
# pyrefly: ignore [unsupported-operation]
if ver >= CMAKE_MINIMUM_VERSION:
eprint(f" (>={CMAKE_MINIMUM_VERSION})")
valid_cmake_versions[cmd] = ver
else:
eprint(f" (<{CMAKE_MINIMUM_VERSION})")
if not valid_cmake_versions:
raise RuntimeError(
f"no cmake or cmake3 with version >= {CMAKE_MINIMUM_VERSION}, "
f"found: {cmake_versions}"
)
return max(valid_cmake_versions, key=valid_cmake_versions.get) # type: ignore[arg-type]
@staticmethod
def _get_version(cmd: str | None) -> Version | None:
"""Returns cmake version."""
if cmd is None:
return None
try:
cmake_capabilities = json.loads(
check_output(
[cmd, "-E", "capabilities"],
stderr=DEVNULL,
text=True,
),
)
except (OSError, CalledProcessError, json.JSONDecodeError):
cmake_capabilities = {}
cmake_version = cmake_capabilities.get("version", {}).get("string")
if cmake_version is not None:
return Version(cmake_version)
raise RuntimeError(f"Failed to get CMake version from command: {cmd}")
def run(self, args: list[str], env: dict[str, str]) -> None:
"""Executes cmake with arguments and an environment."""
command = [self._cmake_command] + args
eprint(" ".join(command))
try:
check_call(command, cwd=self.build_dir, env=env)
except (CalledProcessError, KeyboardInterrupt):
# This error indicates that there was a problem with cmake, the
# Python backtrace adds no signal here so skip over it by catching
# the error and exiting manually
sys.exit(1)
@staticmethod
def defines(args: list[str], **kwargs: CMakeValue) -> None:
"""Adds definitions to a cmake argument list."""
for key, value in sorted(kwargs.items()):
if value is not None:
args.append(f"-D{key}={value}")
def get_cmake_cache_variables(self) -> dict[str, CMakeValue]:
r"""Gets values in CMakeCache.txt into a dictionary.
Returns:
dict: A ``dict`` containing the value of cached CMake variables.
"""
with open(self._cmake_cache_file) as f:
return get_cmake_cache_variables_from_file(f)
def generate(
self,
version: str | None,
cmake_python_library: str | None,
build_python: bool,
build_test: bool,
my_env: dict[str, str],
rerun: bool,
) -> None:
"""Runs cmake to generate native build files."""
if rerun and os.path.isfile(self._cmake_cache_file):
os.remove(self._cmake_cache_file)
cmake_cache_file_available = os.path.exists(self._cmake_cache_file)
if cmake_cache_file_available:
cmake_cache_variables = self.get_cmake_cache_variables()
make_program: str | None = cmake_cache_variables.get("CMAKE_MAKE_PROGRAM") # type: ignore[assignment]
if make_program and not shutil.which(make_program):
# CMakeCache.txt exists, but the make program (e.g., ninja) does not.
# See also: https://github.com/astral-sh/uv/issues/14269
# This can happen if building with PEP-517 build isolation, where `ninja` was
# installed in the isolated environment of the previous build run, but it has been
# removed. The `ninja` executable with an old absolute path not available anymore.
eprint(
"!!!WARNING!!!: CMakeCache.txt exists, "
f"but CMAKE_MAKE_PROGRAM ({make_program!r}) does not exist. "
"Clearing CMake cache."
)
self.clear_cache()
cmake_cache_file_available = False
if cmake_cache_file_available and (
not USE_NINJA or os.path.exists(self._ninja_build_file)
):
# Everything's in place. Do not rerun.
return
args = []
if USE_NINJA:
# Avoid conflicts in '-G' and the `CMAKE_GENERATOR`
os.environ["CMAKE_GENERATOR"] = "Ninja"
args.append("-GNinja")
elif IS_WINDOWS:
generator = os.getenv("CMAKE_GENERATOR", "Visual Studio 16 2019")
supported = ["Visual Studio 16 2019", "Visual Studio 17 2022"]
if generator not in supported:
eprint("Unsupported `CMAKE_GENERATOR`: " + generator)
eprint("Please set it to one of the following values: ")
eprint("\n".join(supported))
sys.exit(1)
args.append("-G" + generator)
toolset_dict = {}
toolset_version = os.getenv("CMAKE_GENERATOR_TOOLSET_VERSION")
if toolset_version is not None:
toolset_dict["version"] = toolset_version
curr_toolset = os.getenv("VCToolsVersion")
if curr_toolset is None:
eprint(
"When you specify `CMAKE_GENERATOR_TOOLSET_VERSION`, you must also "
"activate the vs environment of this version. Please read the notes "
"in the build steps carefully."
)
sys.exit(1)
if IS_64BIT:
if platform.machine() == "ARM64":
args.append("-A ARM64")
else:
args.append("-Ax64")
toolset_dict["host"] = "x64"
if toolset_dict:
toolset_expr = ",".join([f"{k}={v}" for k, v in toolset_dict.items()])
args.append("-T" + toolset_expr)
base_dir = str(Path(__file__).absolute().parents[2])
install_dir = os.path.join(base_dir, "torch")
_mkdir_p(install_dir)
_mkdir_p(self.build_dir)
# Store build options that are directly stored in environment variables
build_options: dict[str, CMakeValue] = {}
# Build options that do not start with "BUILD_", "USE_", or "CMAKE_" and are directly controlled by env vars.
# This is a dict that maps environment variables to the corresponding variable name in CMake.
additional_options = {
# Key: environment variable name. Value: Corresponding variable name to be passed to CMake. If you are
# adding a new build option to this block: Consider making these two names identical and adding this option
# in the block below.
"CUDNN_LIB_DIR": "CUDNN_LIBRARY",
"USE_CUDA_STATIC_LINK": "CAFFE2_STATIC_LINK_CUDA",
}
additional_options.update(
{
# Build options that have the same environment variable name and CMake variable name and that do not start
# with "BUILD_", "USE_", or "CMAKE_". If you are adding a new build option, also make sure you add it to
# CMakeLists.txt.
var: var
for var in (
"UBSAN_FLAGS",
"BLAS",
"WITH_BLAS",
"CUDA_HOST_COMPILER",
"CUDA_NVCC_EXECUTABLE",
"CUDA_SEPARABLE_COMPILATION",
"CUDNN_LIBRARY",
"CUDNN_INCLUDE_DIR",
"CUDNN_ROOT",
"EXPERIMENTAL_SINGLE_THREAD_POOL",
"INSTALL_TEST",
"JAVA_HOME",
"INTEL_MKL_DIR",
"INTEL_OMP_DIR",
"MKL_THREADING",
"MKLDNN_CPU_RUNTIME",
"MSVC_Z7_OVERRIDE",
"CAFFE2_USE_MSVC_STATIC_RUNTIME",
"Numa_INCLUDE_DIR",
"Numa_LIBRARIES",
"ONNX_ML",
"ONNX_NAMESPACE",
"ATEN_THREADING",
"WERROR",
"OPENSSL_ROOT_DIR",
"STATIC_DISPATCH_BACKEND",
"SELECTED_OP_LIST",
"TORCH_CUDA_ARCH_LIST",
"TORCH_XPU_ARCH_LIST",
"TRACING_BASED",
"PYTHON_LIB_REL_PATH",
)
}
)
# Aliases which are lower priority than their canonical option
low_priority_aliases = {
"CUDA_HOST_COMPILER": "CMAKE_CUDA_HOST_COMPILER",
"CUDAHOSTCXX": "CUDA_HOST_COMPILER",
"CMAKE_CUDA_HOST_COMPILER": "CUDA_HOST_COMPILER",
"CMAKE_CUDA_COMPILER": "CUDA_NVCC_EXECUTABLE",
"CUDACXX": "CUDA_NVCC_EXECUTABLE",
}
for var, val in my_env.items():
# We currently pass over all environment variables that start with "BUILD_", "USE_", and "CMAKE_". This is
# because we currently have no reliable way to get the list of all build options we have specified in
# CMakeLists.txt. (`cmake -L` won't print dependent options when the dependency condition is not met.) We
# will possibly change this in the future by parsing CMakeLists.txt ourselves (then additional_options would
# also not be needed to be specified here).
true_var = additional_options.get(var)
if true_var is not None:
build_options[true_var] = val
elif var.startswith(("BUILD_", "USE_", "CMAKE_")) or var.endswith(
("EXITCODE", "EXITCODE__TRYRUN_OUTPUT")
):
build_options[var] = val
if var in low_priority_aliases:
key = low_priority_aliases[var]
if key not in build_options:
build_options[key] = val
# The default value cannot be easily obtained in CMakeLists.txt. We set it here.
py_lib_path = sysconfig.get_path("purelib")
cmake_prefix_path = build_options.get("CMAKE_PREFIX_PATH")
if cmake_prefix_path:
build_options["CMAKE_PREFIX_PATH"] = (
py_lib_path + ";" + cast(str, cmake_prefix_path)
)
else:
build_options["CMAKE_PREFIX_PATH"] = py_lib_path
# Some options must be post-processed. Ideally, this list will be shrunk to only one or two options in the
# future, as CMake can detect many of these libraries pretty comfortably. We have them here for now before CMake
# integration is completed. They appear here not in the CMake.defines call below because they start with either
# "BUILD_" or "USE_" and must be overwritten here.
use_numpy = not check_negative_env_flag("USE_NUMPY")
build_options.update(
{
# Note: Do not add new build options to this dict if it is directly read from environment variable -- you
# only need to add one in `CMakeLists.txt`. All build options that start with "BUILD_", "USE_", or "CMAKE_"
# are automatically passed to CMake; For other options you can add to additional_options above.
"BUILD_PYTHON": build_python,
"BUILD_TEST": build_test,
# Most library detection should go to CMake script, except this one, which Python can do a much better job
# due to NumPy's inherent Pythonic nature.
"USE_NUMPY": use_numpy,
}
)
# Detect build dependencies from python lib path (in order to set *_HOME variables)
# NVSHMEM
nvshmem_py_dir = py_lib_path + "/nvidia/nvshmem"
if os.path.exists(nvshmem_py_dir):
build_options["NVSHMEM_PY_DIR"] = nvshmem_py_dir
# Options starting with CMAKE_
cmake__options = {
"CMAKE_INSTALL_PREFIX": install_dir,
}
# We set some CMAKE_* options in our Python build code instead of relying on the user's direct settings. Emit an
# error if the user also attempts to set these CMAKE options directly.
specified_cmake__options = set(build_options).intersection(cmake__options)
if len(specified_cmake__options) > 0:
eprint(
", ".join(specified_cmake__options)
+ " should not be specified in the environment variable. They are directly set by PyTorch build script."
)
sys.exit(1)
build_options.update(cmake__options)
if use_numpy:
try:
# This helps CMake find the correct include directory for NumPy
# This is especially useful in cross compiled environments
import numpy
Python_NumPy_INCLUDE_DIR = numpy.get_include()
build_options.update(
dict(Python_NumPy_INCLUDE_DIR=Python_NumPy_INCLUDE_DIR)
)
except ImportError:
# use_numpy is just a hint.... so we can fail silently here
pass
CMake.defines(
args,
Python_EXECUTABLE=sys.executable,
TORCH_BUILD_VERSION=version,
**build_options,
)
expected_wrapper = "/usr/local/opt/ccache/libexec"
if IS_DARWIN and os.path.exists(expected_wrapper):
if "CMAKE_C_COMPILER" not in build_options and "CC" not in os.environ:
CMake.defines(args, CMAKE_C_COMPILER=f"{expected_wrapper}/gcc")
if "CMAKE_CXX_COMPILER" not in build_options and "CXX" not in os.environ:
CMake.defines(args, CMAKE_CXX_COMPILER=f"{expected_wrapper}/g++")
for env_var_name in my_env:
if env_var_name.startswith("gh"):
# github env vars use utf-8, on windows, non-ascii code may
# cause problem, so encode first
try:
my_env[env_var_name] = str(my_env[env_var_name].encode("utf-8"))
except UnicodeDecodeError as e:
shex = ":".join(f"{ord(c):02x}" for c in my_env[env_var_name])
eprint(f"Invalid ENV[{env_var_name}] = {shex}")
eprint(e)
# According to the CMake manual, we should pass the arguments first,
# and put the directory as the last element. Otherwise, these flags
# may not be passed correctly.
# Reference:
# 1. https://cmake.org/cmake/help/latest/manual/cmake.1.html#synopsis
# 2. https://stackoverflow.com/a/27169347
args.append(base_dir)
self.run(args, env=my_env)
def build(self, my_env: dict[str, str]) -> None:
"""Runs cmake to build binaries."""
from .env import build_type
build_args = [
"--build",
".",
"--target",
"install",
"--config",
build_type.build_type_string,
]
# Determine the parallelism according to the following
# priorities:
# 1) MAX_JOBS environment variable
# 2) If using the Ninja build system, delegate decision to it.
# 3) Otherwise, fall back to the number of processors.
# Allow the user to set parallelism explicitly. If unset,
# we'll try to figure it out.
max_jobs = os.getenv("MAX_JOBS")
if max_jobs is not None or not USE_NINJA:
# Ninja is capable of figuring out the parallelism on its
# own: only specify it explicitly if we are not using
# Ninja.
# This lists the number of processors available on the
# machine. This may be an overestimate of the usable
# processors if CPU scheduling affinity limits it
# further. In the future, we should check for that with
# os.sched_getaffinity(0) on platforms that support it.
max_jobs = max_jobs or str(multiprocessing.cpu_count())
# CMake 3.12 provides a '-j' option.
build_args += ["-j", max_jobs]
self.run(build_args, my_env)
def clear_cache(self) -> None:
"""Clears the CMake cache."""
if os.path.isfile(self._cmake_cache_file):
os.remove(self._cmake_cache_file)
if os.path.isfile(self._ninja_build_file):
os.remove(self._ninja_build_file)
| CMake |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 225247,
"end": 228014
} | class ____(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
| NonblockConstantTest |
python | numpy__numpy | numpy/ma/core.py | {
"start": 27438,
"end": 27838
} | class ____:
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(x, self.critical_value)
| _DomainGreaterEqual |
python | scikit-learn__scikit-learn | sklearn/linear_model/_huber.py | {
"start": 4339,
"end": 12752
} | class ____(LinearModel, RegressorMixin, BaseEstimator):
"""L2-regularized linear regression model that is robust to outliers.
The Huber Regressor optimizes the squared loss for the samples where
``|(y - Xw - c) / sigma| < epsilon`` and the absolute loss for the samples
where ``|(y - Xw - c) / sigma| > epsilon``, where the model coefficients
``w``, the intercept ``c`` and the scale ``sigma`` are parameters
to be optimized. The parameter `sigma` makes sure that if `y` is scaled up
or down by a certain factor, one does not need to rescale `epsilon` to
achieve the same robustness. Note that this does not take into account
the fact that the different features of `X` may be of different scales.
The Huber loss function has the advantage of not being heavily influenced
by the outliers while not completely ignoring their effect.
Read more in the :ref:`User Guide <huber_regression>`
.. versionadded:: 0.18
Parameters
----------
epsilon : float, default=1.35
The parameter epsilon controls the number of samples that should be
classified as outliers. The smaller the epsilon, the more robust it is
to outliers. Epsilon must be in the range `[1, inf)`.
max_iter : int, default=100
Maximum number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
alpha : float, default=0.0001
Strength of the squared L2 regularization. Note that the penalty is
equal to ``alpha * ||w||^2``.
Must be in the range `[0, inf)`.
warm_start : bool, default=False
This is useful if the stored attributes of a previously used model
has to be reused. If set to False, then the coefficients will
be rewritten for every call to fit.
See :term:`the Glossary <warm_start>`.
fit_intercept : bool, default=True
Whether or not to fit the intercept. This can be set to False
if the data is already centered around the origin.
tol : float, default=1e-05
The iteration will stop when
``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
where pg_i is the i-th component of the projected gradient.
Attributes
----------
coef_ : array, shape (n_features,)
Features got by optimizing the L2-regularized Huber loss.
intercept_ : float
Bias.
scale_ : float
The value by which ``|y - Xw - c|`` is scaled down.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
outliers_ : array, shape (n_samples,)
A boolean mask which is set to True where the samples are identified
as outliers.
See Also
--------
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
Concomitant scale estimates, p. 172
.. [2] Art B. Owen (2006), `A robust hybrid of lasso and ridge regression.
<https://artowen.su.domains/reports/hhu.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import HuberRegressor, LinearRegression
>>> from sklearn.datasets import make_regression
>>> rng = np.random.RandomState(0)
>>> X, y, coef = make_regression(
... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
>>> X[:4] = rng.uniform(10, 20, (4, 2))
>>> y[:4] = rng.uniform(10, 20, 4)
>>> huber = HuberRegressor().fit(X, y)
>>> huber.score(X, y)
-7.284
>>> huber.predict(X[:1,])
array([806.7200])
>>> linear = LinearRegression().fit(X, y)
>>> print("True coefficients:", coef)
True coefficients: [20.4923... 34.1698...]
>>> print("Huber coefficients:", huber.coef_)
Huber coefficients: [17.7906... 31.0106...]
>>> print("Linear Regression coefficients:", linear.coef_)
Linear Regression coefficients: [-1.9221... 7.0226...]
"""
_parameter_constraints: dict = {
"epsilon": [Interval(Real, 1.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha": [Interval(Real, 0, None, closed="left")],
"warm_start": ["boolean"],
"fit_intercept": ["boolean"],
"tol": [Interval(Real, 0.0, None, closed="left")],
}
def __init__(
self,
*,
epsilon=1.35,
max_iter=100,
alpha=0.0001,
warm_start=False,
fit_intercept=True,
tol=1e-05,
):
self.epsilon = epsilon
self.max_iter = max_iter
self.alpha = alpha
self.warm_start = warm_start
self.fit_intercept = fit_intercept
self.tol = tol
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Fitted `HuberRegressor` estimator.
"""
X, y = validate_data(
self,
X,
y,
copy=False,
accept_sparse=["csr"],
y_numeric=True,
dtype=[np.float64, np.float32],
)
sample_weight = _check_sample_weight(sample_weight, X)
if self.warm_start and hasattr(self, "coef_"):
parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 2)
else:
parameters = np.zeros(X.shape[1] + 1)
# Make sure to initialize the scale parameter to a strictly
# positive value:
parameters[-1] = 1
# Sigma or the scale factor should be non-negative.
# Setting it to be zero might cause undefined bounds hence we set it
# to a value close to zero.
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
bounds[-1][0] = np.finfo(np.float64).eps * 10
opt_res = optimize.minimize(
_huber_loss_and_gradient,
parameters,
method="L-BFGS-B",
jac=True,
args=(X, y, self.epsilon, self.alpha, sample_weight),
options={
"maxiter": self.max_iter,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", -1),
},
bounds=bounds,
)
parameters = opt_res.x
if opt_res.status == 2:
raise ValueError(
"HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
% opt_res.message
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.scale_ = parameters[-1]
if self.fit_intercept:
self.intercept_ = parameters[-2]
else:
self.intercept_ = 0.0
self.coef_ = parameters[: X.shape[1]]
residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
self.outliers_ = residual > self.scale_ * self.epsilon
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| HuberRegressor |
python | PyCQA__pylint | tests/functional/r/recursion/recursion_error_crash_2683.py | {
"start": 124,
"end": 335
} | class ____:
def __init__(self):
self.count = 5
def method(self):
records = []
for _ in []:
records += []
records = records[:self.count]
records.sort()
| Cls |
python | django__django | tests/prefetch_related/tests.py | {
"start": 53245,
"end": 56435
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title="Poems", published_year=2010)
cls.book2 = BookWithYear.objects.create(title="More poems", published_year=2011)
cls.author1 = AuthorWithAge.objects.create(
name="Jane", first_book=cls.book1, age=50
)
cls.author2 = AuthorWithAge.objects.create(
name="Tom", first_book=cls.book1, age=49
)
cls.author3 = AuthorWithAge.objects.create(
name="Robert", first_book=cls.book2, age=48
)
cls.author_address = AuthorAddress.objects.create(
author=cls.author1, address="SomeStreet 1"
)
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes="review book1")
cls.br2 = BookReview.objects.create(book=cls.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related("addresses")
addresses = [
[str(address) for address in obj.addresses.all()] for obj in qs
]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related("book")
titles = [obj.book.title for obj in qs]
self.assertCountEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related("books_with_year")
with self.assertNumQueries(2):
lst = [
[str(book) for book in author.books_with_year.all()] for author in qs
]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related("aged_authors")
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related("author")]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [
a.authorwithage
for a in Author.objects.prefetch_related("authorwithage")
]
# Regression for #18090: the prefetching query must include an IN
# clause. Note that on Oracle the table name is upper case in the
# generated SQL, thus the .lower() call.
self.assertIn("authorwithage", connection.queries[-1]["sql"].lower())
self.assertIn(" IN ", connection.queries[-1]["sql"])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
| MultiTableInheritanceTest |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 48484,
"end": 70347
} | class ____(Qwen3VLMoePreTrainedModel):
base_model_prefix = "model"
_checkpoint_conversion_mapping = {}
# Reference: fix gemma3 grad acc #37208
accepts_loss_kwargs = False
config: Qwen3VLMoeConfig
_no_split_modules = ["Qwen3VLMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"]
def __init__(self, config):
super().__init__(config)
self.visual = Qwen3VLMoeVisionModel._from_config(config.vision_config)
self.language_model = Qwen3VLMoeTextModel._from_config(config.text_config)
self.rope_deltas = None # cache rope_deltas here
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_rope_index(
self,
input_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Different from the original implementation, Qwen3VLMoe use timestamps rather than absolute time position ids."""
# Since we use timestamps to separate videos, like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>, the video_grid_thw should also be split
if video_grid_thw is not None:
video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0)
video_grid_thw[:, 0] = 1
spatial_merge_size = self.config.vision_config.spatial_merge_size
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
vision_start_token_id = self.config.vision_start_token_id
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is None:
attention_mask = torch.ones_like(total_input_ids)
position_ids = torch.ones(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
image_index, video_index = 0, 0
attention_mask = attention_mask.to(total_input_ids.device)
for i, input_ids in enumerate(total_input_ids):
input_ids = input_ids[attention_mask[i] == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
# t_index is always 0 because llm_grid_t is always 1 (we use timestamps to encode the temporal information for videos)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
else:
position_ids = (
torch.arange(input_ids.shape[1], device=input_ids.device)
.view(1, 1, -1)
.expand(3, input_ids.shape[0], -1)
)
mrope_position_deltas = torch.zeros(
[input_ids.shape[0], 1],
device=input_ids.device,
dtype=input_ids.dtype,
)
return position_ids, mrope_position_deltas
def get_video_features(
self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
):
"""
Encodes videos into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
# Same implementation as for images
return self.get_image_features(pixel_values_videos, video_grid_thw)
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype)
image_embeds, deepstack_image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
image_embeds = torch.split(image_embeds, split_sizes)
return image_embeds, deepstack_image_embeds
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: Optional[torch.FloatTensor] = None,
video_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
)
return special_image_mask, special_video_mask
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.Tensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Qwen3VLMoeModelOutputWithPast]:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
image_mask = None
video_mask = None
if pixel_values is not None:
image_embeds, deepstack_image_embeds = self.get_image_features(pixel_values, image_grid_thw)
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds, deepstack_video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
visual_pos_masks = None
deepstack_visual_embeds = None
if image_mask is not None and video_mask is not None:
# aggregate visual_pos_masks and deepstack_visual_embeds
image_mask = image_mask[..., 0]
video_mask = video_mask[..., 0]
visual_pos_masks = image_mask | video_mask
deepstack_visual_embeds = []
image_mask_joint = image_mask[visual_pos_masks]
video_mask_joint = video_mask[visual_pos_masks]
for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds):
embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device)
embed_joint[image_mask_joint, :] = img_embed
embed_joint[video_mask_joint, :] = vid_embed
deepstack_visual_embeds.append(embed_joint)
elif image_mask is not None:
image_mask = image_mask[..., 0]
visual_pos_masks = image_mask
deepstack_visual_embeds = deepstack_image_embeds
elif video_mask is not None:
video_mask = video_mask[..., 0]
visual_pos_masks = video_mask
deepstack_visual_embeds = deepstack_video_embeds
if position_ids is None:
attention_mask_tensor = (
attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"]
)
if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2)
# Only apply conversion for floating point tensors (inverted masks)
if attention_mask_tensor.dtype.is_floating_point:
attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min
attention_mask_tensor = (1.0 - attention_mask_tensor).int()
# Calculate RoPE index once per generation in the pre-fill stage only.
# When compiling, we can't check tensor values thus we check only input length
# It is safe to assume that `length!=1` means we're in pre-fill because compiled
# models currently cannot do asssisted decoding
prefill_compiled_stage = is_torchdynamo_compiling() and (
(input_ids is not None and input_ids.shape[1] != 1)
or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
)
prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
(cache_position is not None and cache_position[0] == 0)
or (past_key_values is None or past_key_values.get_seq_length() == 0)
)
if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
position_ids, rope_deltas = self.get_rope_index(
input_ids,
image_grid_thw,
video_grid_thw,
attention_mask=attention_mask_tensor,
)
self.rope_deltas = rope_deltas
# then use the prev pre-calculated rope-deltas to get the correct position ids
else:
batch_size, seq_length, _ = inputs_embeds.shape
delta = (
(cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
if cache_position is not None
else 0
)
position_ids = torch.arange(seq_length, device=inputs_embeds.device)
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
if cache_position is not None: # otherwise `deltas` is an int `0`
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
position_ids = position_ids.add(delta)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
visual_pos_masks=visual_pos_masks,
deepstack_visual_embeds=deepstack_visual_embeds,
**kwargs,
)
return Qwen3VLMoeModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
rope_deltas=self.rope_deltas,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
| Qwen3VLMoeModel |
python | plotly__plotly.py | plotly/callbacks.py | {
"start": 45,
"end": 2856
} | class ____:
def __init__(
self, ctrl=None, alt=None, shift=None, meta=None, button=None, buttons=None, **_
):
self._ctrl = ctrl
self._alt = alt
self._meta = meta
self._shift = shift
self._button = button
self._buttons = buttons
def __repr__(self):
return """\
InputDeviceState(
ctrl={ctrl},
alt={alt},
shift={shift},
meta={meta},
button={button},
buttons={buttons})""".format(
ctrl=repr(self.ctrl),
alt=repr(self.alt),
meta=repr(self.meta),
shift=repr(self.shift),
button=repr(self.button),
buttons=repr(self.buttons),
)
@property
def alt(self):
"""
Whether alt key pressed
Returns
-------
bool
"""
return self._alt
@property
def ctrl(self):
"""
Whether ctrl key pressed
Returns
-------
bool
"""
return self._ctrl
@property
def shift(self):
"""
Whether shift key pressed
Returns
-------
bool
"""
return self._shift
@property
def meta(self):
"""
Whether meta key pressed
Returns
-------
bool
"""
return self._meta
@property
def button(self):
"""
Integer code for the button that was pressed on the mouse to trigger
the event
- 0: Main button pressed, usually the left button or the
un-initialized state
- 1: Auxiliary button pressed, usually the wheel button or the middle
button (if present)
- 2: Secondary button pressed, usually the right button
- 3: Fourth button, typically the Browser Back button
- 4: Fifth button, typically the Browser Forward button
Returns
-------
int
"""
return self._button
@property
def buttons(self):
"""
Integer code for which combination of buttons are pressed on the
mouse when the event is triggered.
- 0: No button or un-initialized
- 1: Primary button (usually left)
- 2: Secondary button (usually right)
- 4: Auxilary button (usually middle or mouse wheel button)
- 8: 4th button (typically the "Browser Back" button)
- 16: 5th button (typically the "Browser Forward" button)
Combinations of buttons are represented as the decimal form of the
bitmask of the values above.
For example, pressing both the primary (1) and auxilary (4) buttons
will result in a code of 5
Returns
-------
int
"""
return self._buttons
| InputDeviceState |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 80582,
"end": 86376
} | class ____:
def __init__(self, label):
self._counterVar = 0
self._label = label
def _set_counter(self, value):
"""
Sets counter to value.
"""
self._counterVar = value
@property
def _counter(self):
"""
What counter is currently at.
"""
return self._counterVar
def _next(self):
"""
Generates the next symbols and increments counter by 1.
"""
s = Symbol("%s%i" % (self._label, self._counterVar))
self._counterVar += 1
return s
_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label
@cacheit
def _get_contractions(string1, keep_only_fully_contracted=False):
"""
Returns Add-object with contracted terms.
Uses recursion to find all contractions. -- Internal helper function --
Will find nonzero contractions in string1 between indices given in
leftrange and rightrange.
"""
# Should we store current level of contraction?
if keep_only_fully_contracted and string1:
result = []
else:
result = [NO(Mul(*string1))]
for i in range(len(string1) - 1):
for j in range(i + 1, len(string1)):
c = contraction(string1[i], string1[j])
if c:
sign = (j - i + 1) % 2
if sign:
coeff = S.NegativeOne*c
else:
coeff = c
#
# Call next level of recursion
# ============================
#
# We now need to find more contractions among operators
#
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
#
# To prevent overcounting, we don't allow contractions
# we have already encountered. i.e. contractions between
# string1[:i] <---> string1[i+1:j]
# and string1[:i] <---> string1[j+1:].
#
# This leaves the case:
oplist = string1[i + 1:j] + string1[j + 1:]
if oplist:
result.append(coeff*NO(
Mul(*string1[:i])*_get_contractions( oplist,
keep_only_fully_contracted=keep_only_fully_contracted)))
else:
result.append(coeff*NO( Mul(*string1[:i])))
if keep_only_fully_contracted:
break # next iteration over i leaves leftmost operator string1[0] uncontracted
return Add(*result)
def wicks(e, **kw_args):
"""
Returns the normal ordered equivalent of an expression using Wicks Theorem.
Examples
========
>>> from sympy import symbols, Dummy
>>> from sympy.physics.secondquant import wicks, F, Fd
>>> p, q, r = symbols('p,q,r')
>>> wicks(Fd(p)*F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q) + NO(CreateFermion(p)*AnnihilateFermion(q))
By default, the expression is expanded:
>>> wicks(F(p)*(F(q)+F(r)))
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r))
With the keyword 'keep_only_fully_contracted=True', only fully contracted
terms are returned.
By request, the result can be simplified in the following order:
-- KroneckerDelta functions are evaluated
-- Dummy variables are substituted consistently across terms
>>> p, q, r = symbols('p q r', cls=Dummy)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True)
KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
"""
if not e:
return S.Zero
opts = {
'simplify_kronecker_deltas': False,
'expand': True,
'simplify_dummies': False,
'keep_only_fully_contracted': False
}
opts.update(kw_args)
# check if we are already normally ordered
if isinstance(e, (NO, FermionicOperator)):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
# break up any NO-objects, and evaluate commutators
e = e.doit(wicks=True)
# make sure we have only one term to consider
e = e.expand()
if isinstance(e, Add):
if opts['simplify_dummies']:
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
else:
return Add(*[ wicks(term, **kw_args) for term in e.args])
# For Mul-objects we can actually do something
if isinstance(e, Mul):
# we don't want to mess around with commuting part of Mul
# so we factorize it out before starting recursion
c_part = []
string1 = []
for factor in e.args:
if factor.is_commutative:
c_part.append(factor)
else:
string1.append(factor)
n = len(string1)
# catch trivial cases
if n == 0:
result = e
elif n == 1:
if opts['keep_only_fully_contracted']:
return S.Zero
else:
result = e
else: # non-trivial
if isinstance(string1[0], BosonicOperator):
raise NotImplementedError
string1 = tuple(string1)
# recursion over higher order contractions
result = _get_contractions(string1,
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
result = Mul(*c_part)*result
if opts['expand']:
result = result.expand()
if opts['simplify_kronecker_deltas']:
result = evaluate_deltas(result)
return result
# there was nothing to do
return e
| _SymbolFactory |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 16667,
"end": 16878
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ACTIVE", "PENDING_DELETION", "SUSPENDED")
| OauthApplicationCreateAuditEntryState |
python | pyqtgraph__pyqtgraph | tests/test_reload.py | {
"start": 299,
"end": 2977
} | class ____(pg.QtCore.QObject):
sig = pg.QtCore.Signal()
# https://www.riverbankcomputing.com/pipermail/pyqt/2024-August/045989.html
# @pg.QtCore.Slot()
def fn(self):
print("{msg}")
"""
def remove_cache(mod):
if os.path.isfile(mod+'c'):
os.remove(mod+'c')
cachedir = os.path.join(os.path.dirname(mod), '__pycache__')
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
@pytest.mark.skipif(
(
pg.Qt.QT_LIB.startswith("PySide") and
parse(pg.Qt.QtVersion) < Version('6.6.0') and # not sure when exactly fixed
platform != 'Darwin' # seems to work on macOS
),
reason="Unknown Issue"
)
# https://www.riverbankcomputing.com/pipermail/pyqt/2024-August/045989.html
@pytest.mark.qt_log_ignore("Registering dynamic slot")
@pytest.mark.usefixtures("tmp_module")
def test_reload(tmp_module):
# write a module
mod = os.path.join(tmp_module, 'reload_test_mod.py')
print("\nRELOAD FILE:", mod)
with open(mod, "w") as file_:
file_.write(code.format(path_repr=pgpath_repr, msg="C.fn() Version1"))
# import the new module
import reload_test_mod
print("RELOAD MOD:", reload_test_mod.__file__)
c = reload_test_mod.C()
c.sig.connect(c.fn)
v1 = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
# write again and reload
with open(mod, "w") as file_:
file_.write(code.format(path_repr=pgpath_repr, msg="C.fn() Version 2"))
time.sleep(1.1)
#remove_cache(mod)
_ = pg.reload.reloadAll(tmp_module, debug=True)
v2 = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
oldcfn = pg.reload.getPreviousVersion(c.fn)
if oldcfn is None:
# Function did not reload; are we using pytest's assertion rewriting?
raise Exception("Function did not reload. (This can happen when using py.test"
" with assertion rewriting; use --assert=plain for this test.)")
assert oldcfn.__func__ is v1[2]
assert oldcfn.__self__ is c
# write again and reload
with open(mod, "w") as file_:
file_.write(code.format(path_repr=pgpath_repr, msg="C.fn() Version2"))
time.sleep(1.1)
# remove_cache(mod)
_ = pg.reload.reloadAll(tmp_module, debug=True)
_ = (reload_test_mod.C, reload_test_mod.C.sig, reload_test_mod.C.fn, c.sig, c.fn, c.fn.__func__)
cfn1 = pg.reload.getPreviousVersion(c.fn)
cfn2 = pg.reload.getPreviousVersion(cfn1)
assert cfn1.__func__ is v2[2]
assert cfn2.__func__ is v1[2]
assert cfn1.__self__ is c
assert cfn2.__self__ is c
pg.functions.disconnect(c.sig, c.fn)
| C |
python | rq__rq | tests/test_worker.py | {
"start": 1369,
"end": 1406
} | class ____(Queue):
pass
| CustomQueue |
python | getsentry__sentry | tests/apidocs/endpoints/integration_platform/test_sentry_app_external_issues.py | {
"start": 314,
"end": 1534
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.project = self.create_project(organization=self.org)
self.group = self.create_group(project=self.project)
self.sentry_app = self.create_sentry_app(
name="Hellboy App", published=True, organization=self.org
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.install = SentryAppInstallation(
sentry_app=self.sentry_app, organization_id=self.org.id
)
self.install.save()
self.url = reverse(
"sentry-api-0-sentry-app-installation-external-issues",
kwargs={"uuid": self.install.uuid},
)
self.login_as(user=self.user)
def test_post(self) -> None:
data = {
"issueId": self.group.id,
"webUrl": "https://somerandom.io/project/issue-id",
"project": "ExternalProj",
"identifier": "issue-1",
}
response = self.client.post(self.url, data)
request = RequestFactory().post(self.url, data)
self.validate_schema(request, response)
| SentryAppDocsTest |
python | optuna__optuna | optuna/samplers/_cmaes.py | {
"start": 1314,
"end": 27102
} | class ____(BaseSampler):
"""A sampler using `cmaes <https://github.com/CyberAgentAILab/cmaes>`__ as the backend.
Example:
Optimize a simple quadratic function by using :class:`~optuna.samplers.CmaEsSampler`.
.. code-block:: console
$ pip install cmaes
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -1, 1)
y = trial.suggest_int("y", -1, 1)
return x**2 + y
sampler = optuna.samplers.CmaEsSampler()
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=20)
Please note that this sampler does not support CategoricalDistribution.
However, :class:`~optuna.distributions.FloatDistribution` with ``step``,
(:func:`~optuna.trial.Trial.suggest_float`) and
:class:`~optuna.distributions.IntDistribution` (:func:`~optuna.trial.Trial.suggest_int`)
are supported.
If your search space contains categorical parameters, I recommend you
to use :class:`~optuna.samplers.TPESampler` instead.
Furthermore, there is room for performance improvements in parallel
optimization settings. This sampler cannot use some trials for updating
the parameters of multivariate normal distribution.
For further information about CMA-ES algorithm, please refer to the following papers:
- `N. Hansen, The CMA Evolution Strategy: A Tutorial. arXiv:1604.00772, 2016.
<https://arxiv.org/abs/1604.00772>`__
- `A. Auger and N. Hansen. A restart CMA evolution strategy with increasing population
size. In Proceedings of the IEEE Congress on Evolutionary Computation (CEC 2005),
pages 1769–1776. IEEE Press, 2005. <https://doi.org/10.1109/CEC.2005.1554902>`__
- `N. Hansen. Benchmarking a BI-Population CMA-ES on the BBOB-2009 Function Testbed.
GECCO Workshop, 2009. <https://doi.org/10.1145/1570256.1570333>`__
- `Raymond Ros, Nikolaus Hansen. A Simple Modification in CMA-ES Achieving Linear Time and
Space Complexity. 10th International Conference on Parallel Problem Solving From Nature,
Sep 2008, Dortmund, Germany. inria-00287367. <https://doi.org/10.1007/978-3-540-87700-4_30>`__
- `Masahiro Nomura, Shuhei Watanabe, Youhei Akimoto, Yoshihiko Ozaki, Masaki Onishi.
Warm Starting CMA-ES for Hyperparameter Optimization, AAAI. 2021.
<https://doi.org/10.1609/aaai.v35i10.17109>`__
- `R. Hamano, S. Saito, M. Nomura, S. Shirakawa. CMA-ES with Margin: Lower-Bounding Marginal
Probability for Mixed-Integer Black-Box Optimization, GECCO. 2022.
<https://doi.org/10.1145/3512290.3528827>`__
- `M. Nomura, Y. Akimoto, I. Ono. CMA-ES with Learning Rate Adaptation: Can CMA-ES with
Default Population Size Solve Multimodal and Noisy Problems?, GECCO. 2023.
<https://doi.org/10.1145/3583131.3590358>`__
.. seealso::
You can also use `optuna_integration.PyCmaSampler <https://optuna-integration.readthedocs.io/en/stable/reference/generated/optuna_integration.PyCmaSampler.html#optuna_integration.PyCmaSampler>`__ which is a sampler using cma
library as the backend.
Args:
x0:
A dictionary of an initial parameter values for CMA-ES. By default, the mean of ``low``
and ``high`` for each distribution is used. Note that ``x0`` is sampled uniformly
within the search space domain for each restart if you specify ``restart_strategy``
argument.
sigma0:
Initial standard deviation of CMA-ES. By default, ``sigma0`` is set to
``min_range / 6``, where ``min_range`` denotes the minimum range of the distributions
in the search space.
seed:
A random seed for CMA-ES.
n_startup_trials:
The independent sampling is used instead of the CMA-ES algorithm until the given number
of trials finish in the same study.
independent_sampler:
A :class:`~optuna.samplers.BaseSampler` instance that is used for independent
sampling. The parameters not contained in the relative search space are sampled
by this sampler.
The search space for :class:`~optuna.samplers.CmaEsSampler` is determined by
:func:`~optuna.search_space.intersection_search_space()`.
If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used
as the default.
.. seealso::
:class:`optuna.samplers` module provides built-in independent samplers
such as :class:`~optuna.samplers.RandomSampler` and
:class:`~optuna.samplers.TPESampler`.
warn_independent_sampling:
If this is :obj:`True`, a warning message is emitted when
the value of a parameter is sampled by using an independent sampler.
Note that the parameters of the first trial in a study are always sampled
via an independent sampler, so no warning messages are emitted in this case.
restart_strategy:
Strategy for restarting CMA-ES optimization when converges to a local minimum.
If :obj:`None` is given, CMA-ES will not restart (default).
If 'ipop' is given, CMA-ES will restart with increasing population size.
if 'bipop' is given, CMA-ES will restart with the population size
increased or decreased.
Please see also ``inc_popsize`` parameter.
.. warning::
Deprecated in v4.4.0. ``restart_strategy`` argument will be removed in the future.
The removal of this feature is currently scheduled for v6.0.0,
but this schedule is subject to change.
From v4.4.0 onward, ``restart_strategy`` automatically falls back to ``None``, and
``restart_strategy`` will be supported in OptunaHub.
See https://github.com/optuna/optuna/releases/tag/v4.4.0.
popsize:
A population size of CMA-ES.
inc_popsize:
Multiplier for increasing population size before each restart.
This argument will be used when ``restart_strategy = 'ipop'``
or ``restart_strategy = 'bipop'`` is specified.
.. warning::
Deprecated in v4.4.0. ``inc_popsize`` argument will be removed in the future.
The removal of this feature is currently scheduled for v6.0.0,
but this schedule is subject to change.
From v4.4.0 onward, ``inc_popsize`` is no longer utilized within Optuna, and
``inc_popsize`` will be supported in OptunaHub.
See https://github.com/optuna/optuna/releases/tag/v4.4.0.
consider_pruned_trials:
If this is :obj:`True`, the PRUNED trials are considered for sampling.
.. note::
Added in v2.0.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.0.0.
.. note::
It is suggested to set this flag :obj:`False` when the
:class:`~optuna.pruners.MedianPruner` is used. On the other hand, it is suggested
to set this flag :obj:`True` when the :class:`~optuna.pruners.HyperbandPruner` is
used. Please see `the benchmark result
<https://github.com/optuna/optuna/pull/1229>`__ for the details.
use_separable_cma:
If this is :obj:`True`, the covariance matrix is constrained to be diagonal.
Due to reduce the model complexity, the learning rate for the covariance matrix
is increased. Consequently, this algorithm outperforms CMA-ES on separable functions.
.. note::
Added in v2.6.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.6.0.
with_margin:
If this is :obj:`True`, CMA-ES with margin is used. This algorithm prevents samples in
each discrete distribution (:class:`~optuna.distributions.FloatDistribution` with
``step`` and :class:`~optuna.distributions.IntDistribution`) from being fixed to a single
point.
Currently, this option cannot be used with ``use_separable_cma=True``.
.. note::
Added in v3.1.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v3.1.0.
lr_adapt:
If this is :obj:`True`, CMA-ES with learning rate adaptation is used.
This algorithm focuses on working well on multimodal and/or noisy problems
with default settings.
Currently, this option cannot be used with ``use_separable_cma=True`` or
``with_margin=True``.
.. note::
Added in v3.3.0 or later, as an experimental feature.
The interface may change in newer versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v3.3.0.
source_trials:
This option is for Warm Starting CMA-ES, a method to transfer prior knowledge on
similar HPO tasks through the initialization of CMA-ES. This method estimates a
promising distribution from ``source_trials`` and generates the parameter of
multivariate gaussian distribution. Please note that it is prohibited to use
``x0``, ``sigma0``, or ``use_separable_cma`` argument together.
.. note::
Added in v2.6.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.6.0.
""" # NOQA: E501
def __init__(
self,
x0: dict[str, Any] | None = None,
sigma0: float | None = None,
n_startup_trials: int = 1,
independent_sampler: BaseSampler | None = None,
warn_independent_sampling: bool = True,
seed: int | None = None,
*,
consider_pruned_trials: bool = False,
restart_strategy: str | None = None,
popsize: int | None = None,
inc_popsize: int = -1,
use_separable_cma: bool = False,
with_margin: bool = False,
lr_adapt: bool = False,
source_trials: list[FrozenTrial] | None = None,
) -> None:
if restart_strategy is not None or inc_popsize != -1:
msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format(
name="`restart_strategy`", d_ver="4.4.0", r_ver="6.0.0"
)
optuna_warn(
f"{msg} From v4.4.0 onward, `restart_strategy` automatically falls back to "
"`None`. `restart_strategy` will be supported in OptunaHub.",
FutureWarning,
)
self._x0 = x0
self._sigma0 = sigma0
self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed)
self._n_startup_trials = n_startup_trials
self._warn_independent_sampling = warn_independent_sampling
self._cma_rng = LazyRandomState(seed)
self._search_space = IntersectionSearchSpace()
self._consider_pruned_trials = consider_pruned_trials
self._popsize = popsize
self._use_separable_cma = use_separable_cma
self._with_margin = with_margin
self._lr_adapt = lr_adapt
self._source_trials = source_trials
if self._use_separable_cma:
self._attr_prefix = "sepcma:"
elif self._with_margin:
self._attr_prefix = "cmawm:"
else:
self._attr_prefix = "cma:"
if self._consider_pruned_trials:
warn_experimental_argument("consider_pruned_trials")
if self._use_separable_cma:
warn_experimental_argument("use_separable_cma")
if self._source_trials is not None:
warn_experimental_argument("source_trials")
if self._with_margin:
warn_experimental_argument("with_margin")
if self._lr_adapt:
warn_experimental_argument("lr_adapt")
if source_trials is not None and (x0 is not None or sigma0 is not None):
raise ValueError(
"It is prohibited to pass `source_trials` argument when x0 or sigma0 is specified."
)
# TODO(c-bata): Support WS-sep-CMA-ES.
if source_trials is not None and use_separable_cma:
raise ValueError(
"It is prohibited to pass `source_trials` argument when using separable CMA-ES."
)
if lr_adapt and (use_separable_cma or with_margin):
raise ValueError(
"It is prohibited to pass `use_separable_cma` or `with_margin` argument when "
"using `lr_adapt`."
)
# TODO(knshnb): Support sep-CMA-ES with margin.
if self._use_separable_cma and self._with_margin:
raise ValueError(
"Currently, we do not support `use_separable_cma=True` and `with_margin=True`."
)
def reseed_rng(self) -> None:
# _cma_rng doesn't require reseeding because the relative sampling reseeds in each trial.
self._independent_sampler.reseed_rng()
def infer_relative_search_space(
self, study: "optuna.Study", trial: "optuna.trial.FrozenTrial"
) -> dict[str, BaseDistribution]:
search_space: dict[str, BaseDistribution] = {}
for name, distribution in self._search_space.calculate(study).items():
if distribution.single():
# `cma` cannot handle distributions that contain just a single value, so we skip
# them. Note that the parameter values for such distributions are sampled in
# `Trial`.
continue
if not isinstance(distribution, (FloatDistribution, IntDistribution)):
# Categorical distribution is unsupported.
continue
search_space[name] = distribution
return search_space
def sample_relative(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
search_space: dict[str, BaseDistribution],
) -> dict[str, Any]:
self._raise_error_if_multi_objective(study)
if len(search_space) == 0:
return {}
completed_trials = self._get_trials(study)
if len(completed_trials) < self._n_startup_trials:
return {}
# When `with_margin=True`, bounds in discrete dimensions are handled inside `CMAwM`.
trans = _SearchSpaceTransform(
search_space, transform_step=not self._with_margin, transform_0_1=True
)
optimizer = self._restore_optimizer(completed_trials)
if optimizer is None:
optimizer = self._init_optimizer(trans, study.direction)
if optimizer.dim != len(trans.bounds):
if self._warn_independent_sampling:
ind_sampler_name = self._independent_sampler.__class__.__name__
_logger.warning(
"`CmaEsSampler` does not support dynamic search space. "
f"`{ind_sampler_name}` is used instead of `CmaEsSampler`."
)
self._warn_independent_sampling = False
return {}
# TODO(c-bata): Reduce the number of wasted trials during parallel optimization.
# See https://github.com/optuna/optuna/pull/920#discussion_r385114002 for details.
solution_trials = self._get_solution_trials(completed_trials, optimizer.generation)
if len(solution_trials) >= optimizer.population_size:
solutions: list[tuple[np.ndarray, float]] = []
for t in solution_trials[: optimizer.population_size]:
assert t.value is not None, "completed trials must have a value"
if isinstance(optimizer, cmaes.CMAwM):
x = np.array(t.system_attrs["x_for_tell"])
else:
x = trans.transform(t.params)
y = t.value if study.direction == StudyDirection.MINIMIZE else -t.value
solutions.append((x, y))
optimizer.tell(solutions)
# Store optimizer.
optimizer_str = pickle.dumps(optimizer).hex()
optimizer_attrs = self._split_optimizer_str(optimizer_str)
for key in optimizer_attrs:
study._storage.set_trial_system_attr(trial._trial_id, key, optimizer_attrs[key])
# Caution: optimizer should update its seed value.
seed = self._cma_rng.rng.randint(1, 2**16) + trial.number
optimizer._rng.seed(seed)
if isinstance(optimizer, cmaes.CMAwM):
params, x_for_tell = optimizer.ask()
study._storage.set_trial_system_attr(
trial._trial_id, "x_for_tell", x_for_tell.tolist()
)
else:
params = optimizer.ask()
generation_attr_key = self._attr_key_generation
study._storage.set_trial_system_attr(
trial._trial_id, generation_attr_key, optimizer.generation
)
external_values = trans.untransform(params)
return external_values
@property
def _attr_key_generation(self) -> str:
return self._attr_prefix + "generation"
@property
def _attr_key_optimizer(self) -> str:
return self._attr_prefix + "optimizer"
def _concat_optimizer_attrs(self, optimizer_attrs: dict[str, str]) -> str:
return "".join(
optimizer_attrs[f"{self._attr_key_optimizer}:{i}"] for i in range(len(optimizer_attrs))
)
def _split_optimizer_str(self, optimizer_str: str) -> dict[str, str]:
optimizer_len = len(optimizer_str)
attrs = {}
for i in range(math.ceil(optimizer_len / _SYSTEM_ATTR_MAX_LENGTH)):
start = i * _SYSTEM_ATTR_MAX_LENGTH
end = min((i + 1) * _SYSTEM_ATTR_MAX_LENGTH, optimizer_len)
attrs[f"{self._attr_key_optimizer}:{i}"] = optimizer_str[start:end]
return attrs
def _restore_optimizer(
self,
completed_trials: "list[optuna.trial.FrozenTrial]",
) -> "CmaClass" | None:
# Restore a previous CMA object.
for trial in reversed(completed_trials):
optimizer_attrs = {
key: value
for key, value in trial.system_attrs.items()
if key.startswith(self._attr_key_optimizer)
}
if len(optimizer_attrs) == 0:
continue
optimizer_str = self._concat_optimizer_attrs(optimizer_attrs)
return pickle.loads(bytes.fromhex(optimizer_str))
return None
def _init_optimizer(
self,
trans: _SearchSpaceTransform,
direction: StudyDirection,
) -> "CmaClass":
lower_bounds = trans.bounds[:, 0]
upper_bounds = trans.bounds[:, 1]
n_dimension = len(trans.bounds)
if self._source_trials is None:
if self._x0 is None:
mean = lower_bounds + (upper_bounds - lower_bounds) / 2
else:
# `self._x0` is external representations.
mean = trans.transform(self._x0)
if self._sigma0 is None:
sigma0 = np.min((upper_bounds - lower_bounds) / 6)
else:
sigma0 = self._sigma0
cov = None
else:
expected_states = [TrialState.COMPLETE]
if self._consider_pruned_trials:
expected_states.append(TrialState.PRUNED)
# TODO(c-bata): Filter parameters by their values instead of checking search space.
sign = 1 if direction == StudyDirection.MINIMIZE else -1
source_solutions = [
(trans.transform(t.params), sign * cast(float, t.value))
for t in self._source_trials
if t.state in expected_states
and _is_compatible_search_space(trans, t.distributions)
]
if len(source_solutions) == 0:
raise ValueError("No compatible source_trials")
# TODO(c-bata): Add options to change prior parameters (alpha and gamma).
mean, sigma0, cov = cmaes.get_warm_start_mgd(source_solutions)
# Avoid ZeroDivisionError in cmaes.
sigma0 = max(sigma0, _EPS)
if self._use_separable_cma:
if len(trans.bounds) == 1:
optuna_warn(
"Separable CMA-ES does not operate meaningfully on single-dimensional "
"search spaces. The setting `use_separable_cma=True` will be ignored.",
UserWarning,
)
else:
return cmaes.SepCMA(
mean=mean,
sigma=sigma0,
bounds=trans.bounds,
seed=self._cma_rng.rng.randint(1, 2**31 - 2),
n_max_resampling=10 * n_dimension,
population_size=self._popsize,
)
if self._with_margin:
steps = np.empty(len(trans._search_space), dtype=float)
for i, dist in enumerate(trans._search_space.values()):
assert isinstance(dist, (IntDistribution, FloatDistribution))
# Set step 0.0 for continuous search space.
if dist.step is None or dist.log:
steps[i] = 0.0
elif dist.low == dist.high:
steps[i] = 1.0
else:
steps[i] = dist.step / (dist.high - dist.low)
return cmaes.CMAwM(
mean=mean,
sigma=sigma0,
bounds=trans.bounds,
steps=steps,
cov=cov,
seed=self._cma_rng.rng.randint(1, 2**31 - 2),
n_max_resampling=10 * n_dimension,
population_size=self._popsize,
)
return cmaes.CMA(
mean=mean,
sigma=sigma0,
cov=cov,
bounds=trans.bounds,
seed=self._cma_rng.rng.randint(1, 2**31 - 2),
n_max_resampling=10 * n_dimension,
population_size=self._popsize,
lr_adapt=self._lr_adapt,
)
def sample_independent(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
self._raise_error_if_multi_objective(study)
if self._warn_independent_sampling:
complete_trials = self._get_trials(study)
if len(complete_trials) >= self._n_startup_trials:
self._log_independent_sampling(trial, param_name)
return self._independent_sampler.sample_independent(
study, trial, param_name, param_distribution
)
def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None:
_logger.warning(
_INDEPENDENT_SAMPLING_WARNING_TEMPLATE.format(
param_name=param_name,
trial_number=trial.number,
independent_sampler_name=self._independent_sampler.__class__.__name__,
sampler_name=self.__class__.__name__,
fallback_reason=(
"dynamic search space and `CategoricalDistribution` are not supported "
"by `CmaEsSampler`"
),
)
)
def _get_trials(self, study: "optuna.Study") -> list[FrozenTrial]:
complete_trials = []
for t in study._get_trials(deepcopy=False, use_cache=True):
if t.state == TrialState.COMPLETE:
complete_trials.append(t)
elif (
t.state == TrialState.PRUNED
and len(t.intermediate_values) > 0
and self._consider_pruned_trials
):
_, value = max(t.intermediate_values.items())
if value is None:
continue
# We rewrite the value of the trial `t` for sampling, so we need a deepcopy.
copied_t = copy.deepcopy(t)
copied_t.value = value
complete_trials.append(copied_t)
return complete_trials
def _get_solution_trials(
self, trials: list[FrozenTrial], generation: int
) -> list[FrozenTrial]:
generation_attr_key = self._attr_key_generation
return [t for t in trials if generation == t.system_attrs.get(generation_attr_key, -1)]
def before_trial(self, study: optuna.Study, trial: FrozenTrial) -> None:
self._independent_sampler.before_trial(study, trial)
def after_trial(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
state: TrialState,
values: Sequence[float] | None,
) -> None:
self._independent_sampler.after_trial(study, trial, state, values)
def _is_compatible_search_space(
trans: _SearchSpaceTransform, search_space: dict[str, BaseDistribution]
) -> bool:
intersection_size = len(set(trans._search_space.keys()).intersection(search_space.keys()))
return intersection_size == len(trans._search_space) == len(search_space)
| CmaEsSampler |
python | arrow-py__arrow | arrow/locales.py | {
"start": 77602,
"end": 80895
} | class ____(Locale):
names = ["he", "he-il"]
past = "לפני {0}"
future = "בעוד {0}"
and_word = "ו"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "הרגע",
"second": "שנייה",
"seconds": "{0} שניות",
"minute": "דקה",
"minutes": "{0} דקות",
"hour": "שעה",
"hours": {"2": "שעתיים", "ten": "{0} שעות", "higher": "{0} שעות"},
"day": "יום",
"days": {"2": "יומיים", "ten": "{0} ימים", "higher": "{0} יום"},
"week": "שבוע",
"weeks": {"2": "שבועיים", "ten": "{0} שבועות", "higher": "{0} שבועות"},
"month": "חודש",
"months": {"2": "חודשיים", "ten": "{0} חודשים", "higher": "{0} חודשים"},
"year": "שנה",
"years": {"2": "שנתיים", "ten": "{0} שנים", "higher": "{0} שנה"},
}
meridians = {
"am": 'לפנ"צ',
"pm": 'אחר"צ',
"AM": "לפני הצהריים",
"PM": "אחרי הצהריים",
}
month_names = [
"",
"ינואר",
"פברואר",
"מרץ",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר",
]
month_abbreviations = [
"",
"ינו׳",
"פבר׳",
"מרץ",
"אפר׳",
"מאי",
"יוני",
"יולי",
"אוג׳",
"ספט׳",
"אוק׳",
"נוב׳",
"דצמ׳",
]
day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"]
day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"]
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, Mapping):
if delta == 2:
form = form["2"]
elif delta == 0 or 2 < delta <= 10:
form = form["ten"]
else:
form = form["higher"]
return form.format(delta)
def describe_multi(
self,
timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]],
only_distance: bool = False,
) -> str:
"""Describes a delta within multiple timeframes in plain language.
In Hebrew, the and word behaves a bit differently.
:param timeframes: a list of string, quantity pairs each representing a timeframe and delta.
:param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
"""
humanized = ""
for index, (timeframe, delta) in enumerate(timeframes):
last_humanized = self._format_timeframe(timeframe, trunc(delta))
if index == 0:
humanized = last_humanized
elif index == len(timeframes) - 1: # Must have at least 2 items
humanized += " " + self.and_word
if last_humanized[0].isdecimal():
humanized += "־"
humanized += last_humanized
else: # Don't add for the last one
humanized += ", " + last_humanized
if not only_distance:
humanized = self._format_relative(humanized, timeframe, trunc(delta))
return humanized
| HebrewLocale |
python | huggingface__transformers | src/transformers/utils/dummy_mistral_common_objects.py | {
"start": 129,
"end": 309
} | class ____(metaclass=DummyObject):
_backends = ["mistral-common"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["mistral-common"])
| MistralCommonBackend |
python | langchain-ai__langchain | libs/partners/mistralai/tests/integration_tests/test_chat_models.py | {
"start": 1873,
"end": 1935
} | class ____(BaseModel):
name: str
authors: list[str]
| Book |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 69848,
"end": 70305
} | class ____(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
| HTTPResponse |
python | getsentry__sentry | tests/sentry/auth/test_email.py | {
"start": 235,
"end": 2800
} | class ____(TestCase):
def setUp(self) -> None:
self.user1 = self.create_user()
self.user2 = self.create_user()
def test_no_match(self) -> None:
result = resolve_email_to_user("no_one@example.com")
assert result is None
def test_single_match(self) -> None:
result = resolve_email_to_user(self.user1.email)
assert result == self.user1
@mock.patch("sentry.auth.email.metrics")
def test_ambiguous_match(self, mock_metrics: mock.MagicMock) -> None:
for user in (self.user1, self.user2):
self.create_useremail(user=user, email="me@example.com")
with pytest.raises(AmbiguousUserFromEmail) as excinfo:
resolve_email_to_user("me@example.com")
assert set(excinfo.value.users) == {self.user1, self.user2}
assert mock_metrics.incr.call_args.args == ("auth.email_resolution.no_resolution",)
@mock.patch("sentry.auth.email.metrics")
def test_prefers_verified_email(self, mock_metrics: mock.MagicMock) -> None:
org = self.create_organization()
self.create_useremail(user=self.user1, email="me@example.com", is_verified=True)
self.create_useremail(user=self.user2, email="me@example.com", is_verified=False)
self.create_member(organization=org, user_id=self.user2.id)
result = resolve_email_to_user("me@example.com", organization=org)
assert result == self.user1
assert mock_metrics.incr.call_args.args == ("auth.email_resolution.by_verification",)
@mock.patch("sentry.auth.email.metrics")
def test_prefers_org_member(self, mock_metrics: mock.MagicMock) -> None:
org = self.create_organization()
self.create_useremail(user=self.user1, email="me@example.com", is_verified=True)
self.create_useremail(user=self.user2, email="me@example.com", is_verified=True)
self.create_member(organization=org, user_id=self.user2.id)
result = resolve_email_to_user("me@example.com", organization=org)
assert result == self.user2
assert mock_metrics.incr.call_args.args == ("auth.email_resolution.by_org_membership",)
@mock.patch("sentry.auth.email.metrics")
def test_prefers_primary_email(self, mock_metrics: mock.MagicMock) -> None:
self.create_useremail(user=self.user1, email=self.user2.email, is_verified=True)
result = resolve_email_to_user(self.user2.email)
assert result == self.user2
assert mock_metrics.incr.call_args.args == ("auth.email_resolution.by_primary_email",)
| EmailResolverTest |
python | pandas-dev__pandas | pandas/tests/reductions/test_reductions.py | {
"start": 45200,
"end": 47311
} | class ____:
# Note: the name TestCategoricalSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_unordered_raises(self, function):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
msg = f"Categorical is not ordered for operation {function}"
with pytest.raises(TypeError, match=msg):
getattr(cat, function)()
@pytest.mark.parametrize(
"values, categories",
[
(list("abc"), list("abc")),
(list("abc"), list("cba")),
(list("abc") + [np.nan], list("cba")),
([1, 2, 3], [3, 2, 1]),
([1, 2, 3, np.nan], [3, 2, 1]),
],
)
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_ordered(self, values, categories, function):
# GH 25303
cat = Series(Categorical(values, categories=categories, ordered=True))
result = getattr(cat, function)(skipna=True)
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_ordered_with_nan_only(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Series(Categorical([np.nan], categories=[1, 2], ordered=True))
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_skipna(self, function, skipna):
cat = Series(
Categorical(["a", "b", np.nan, "a"], categories=["b", "a"], ordered=True)
)
result = getattr(cat, function)(skipna=skipna)
if skipna is True:
expected = "b" if function == "min" else "a"
assert result == expected
else:
assert result is np.nan
| TestCategoricalSeriesReductions |
python | wandb__wandb | wandb/sdk/launch/registry/abstract.py | {
"start": 106,
"end": 1146
} | class ____(ABC):
"""Abstract base class for registries."""
uri: str
async def get_username_password(self) -> Tuple[str, str]:
"""Get the username and password for the registry.
Returns:
(str, str): The username and password.
"""
raise NotImplementedError
@abstractmethod
async def get_repo_uri(self) -> str:
"""Get the URI for a repository.
Returns:
str: The URI.
"""
raise NotImplementedError
@abstractmethod
async def check_image_exists(self, image_uri: str) -> bool:
"""Check if an image exists in the registry.
Arguments:
image_uri (str): The URI of the image.
Returns:
bool: True if the image exists.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def from_config(
cls,
config: dict,
) -> "AbstractRegistry":
"""Create a registry from a config."""
raise NotImplementedError
| AbstractRegistry |
python | pytorch__pytorch | test/inductor/test_aot_inductor.py | {
"start": 279282,
"end": 280734
} | class ____(LoggingTestCase):
@make_logging_test(dynamic=logging.DEBUG)
def test_shape_env_reuse(self, records):
# make sure ShapeEnv is only created once and reused afterwards
class Foo(torch.nn.Module):
def forward(self, x):
return x + 2
inputs = (torch.randn(4, 4),)
dynamic_shapes = {
"x": {0: Dim.AUTO, 1: Dim.AUTO},
}
ep = export(Foo(), inputs, dynamic_shapes=dynamic_shapes, strict=False)
with torch.no_grad():
torch._inductor.aot_compile(ep.module(), inputs)
self.assertEqual([r.msg == "create_env" for r in records].count(True), 1)
@make_logging_test(dynamic=logging.DEBUG)
def test_shape_env_reuse_zero_consts_use_consts_asm_false(self, records):
# make sure ShapeEnv is only created once and reused afterwards
class Foo(torch.nn.Module):
def forward(self, x):
return x + 2
inputs = (torch.randn(4, 4),)
dynamic_shapes = {
"x": {0: Dim.AUTO, 1: Dim.AUTO},
}
ep = export(Foo(), inputs, dynamic_shapes=dynamic_shapes, strict=False)
with (
torch.no_grad(),
config.patch({"aot_inductor.use_consts_asm_build": False}),
):
torch._inductor.aot_compile(ep.module(), inputs)
self.assertEqual([r.msg == "create_env" for r in records].count(True), 1)
| AOTInductorLoggingTest |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 98201,
"end": 99726
} | class ____:
def test_unsupported_storage(self):
settings = {
"FEEDS": {
"unsupported://uri": {},
},
}
crawler = get_crawler(settings_dict=settings)
with pytest.raises(NotConfigured):
FeedExporter.from_crawler(crawler)
def test_unsupported_format(self):
settings = {
"FEEDS": {
"file://path": {
"format": "unsupported_format",
},
},
}
crawler = get_crawler(settings_dict=settings)
with pytest.raises(NotConfigured):
FeedExporter.from_crawler(crawler)
def test_absolute_pathlib_as_uri(self):
with tempfile.NamedTemporaryFile(suffix="json") as tmp:
settings = {
"FEEDS": {
Path(tmp.name).resolve(): {
"format": "json",
},
},
}
crawler = get_crawler(settings_dict=settings)
exporter = FeedExporter.from_crawler(crawler)
assert isinstance(exporter, FeedExporter)
def test_relative_pathlib_as_uri(self):
settings = {
"FEEDS": {
Path("./items.json"): {
"format": "json",
},
},
}
crawler = get_crawler(settings_dict=settings)
exporter = FeedExporter.from_crawler(crawler)
assert isinstance(exporter, FeedExporter)
| TestFeedExportInit |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py | {
"start": 2620,
"end": 4028
} | class ____(AwsBaseWaiterTrigger):
"""
Asynchronously poll the boto3 API and wait for the JobFlow to finish executing.
:param job_flow_id: The id of the job flow to wait for.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_flow_id: str,
aws_conn_id: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
):
super().__init__(
serialized_fields={"job_flow_id": job_flow_id},
waiter_name="job_flow_waiting",
waiter_args={"ClusterId": job_flow_id},
failure_message="JobFlow creation failed",
status_message="JobFlow creation in progress",
status_queries=[
"Cluster.Status.State",
"Cluster.Status.StateChangeReason",
"Cluster.Status.ErrorDetails",
],
return_key="job_flow_id",
return_value=job_flow_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
| EmrCreateJobFlowTrigger |
python | Lightning-AI__lightning | src/lightning/pytorch/serve/servable_module_validator.py | {
"start": 800,
"end": 7264
} | class ____(Callback):
"""The ServableModuleValidator validates to validate a model correctly implement the ServableModule API.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Arguments:
optimization: The format in which the model should be tested while being served.
server: The library used to evaluate the model serving. The default is FastAPI.
host: The host address associated with the server.
port: The port associated with the server.
timeout: Timeout period in seconds, that the process should wait for the server to start.
exit_on_failure: Whether to exit the process on failure.
"""
def __init__(
self,
optimization: Optional[Literal["trace", "script", "onnx", "tensorrt"]] = None,
server: Literal["fastapi", "ml_server", "torchserve", "sagemaker"] = "fastapi",
host: str = "127.0.0.1",
port: int = 8080,
timeout: int = 20,
exit_on_failure: bool = True,
):
super().__init__()
fastapi_installed = RequirementCache("fastapi")
if not fastapi_installed:
raise ModuleNotFoundError(fastapi_installed.message)
uvicorn_installed = RequirementCache("uvicorn")
if not uvicorn_installed:
raise ModuleNotFoundError(uvicorn_installed.message)
# TODO: Add support for the other options
if optimization is not None:
raise NotImplementedError(f"The optimization {optimization} is currently not supported.")
# TODO: Add support for testing with those server services
if server != "fastapi":
raise NotImplementedError("Only the fastapi server is currently supported.")
self.optimization = optimization
self.host = host
self.port = port
self.server = server
self.timeout = timeout
self.exit_on_failure = exit_on_failure
self.resp: Optional[requests.Response] = None
@override
@rank_zero_only
def on_train_start(self, trainer: "pl.Trainer", servable_module: "pl.LightningModule") -> None:
if isinstance(trainer.strategy, _NOT_SUPPORTED_STRATEGIES):
raise Exception(
f"The current strategy {trainer.strategy.__class__.__qualname__} used "
"by the trainer isn't supported for sanity serving yet."
)
if not isinstance(servable_module, ServableModule):
raise TypeError(f"The provided model should be subclass of {ServableModule.__qualname__}.")
if not is_overridden("configure_payload", servable_module, ServableModule):
raise NotImplementedError("The `configure_payload` method needs to be overridden.")
if not is_overridden("configure_serialization", servable_module, ServableModule):
raise NotImplementedError("The `configure_serialization` method needs to be overridden.")
if not is_overridden("serve_step", servable_module, ServableModule):
raise NotImplementedError("The `serve_step` method needs to be overridden.")
# Note: The Trainer needs to be detached from the pl_module before starting the process.
# This would fail during the deepcopy with DDP.
servable_module.trainer = None
process = Process(target=self._start_server, args=(servable_module, self.host, self.port, self.optimization))
process.start()
servable_module.trainer = trainer
ready = False
t0 = time.time()
while not ready:
with contextlib.suppress(requests.exceptions.ConnectionError):
resp = requests.get(f"http://{self.host}:{self.port}/ping")
ready = resp.status_code == 200
if time.time() - t0 > self.timeout:
process.kill()
raise Exception(f"The server didn't start within {self.timeout} seconds.")
time.sleep(0.1)
payload = servable_module.configure_payload()
if "body" not in payload:
raise Exception(f'Your provided payload {payload} should have a field named "body".')
self.resp = requests.post(f"http://{self.host}:{self.port}/serve", json=payload)
process.kill()
if is_overridden("configure_response", servable_module, ServableModule):
response = servable_module.configure_response()
if self.resp.json() != response:
raise Exception(f"The expected response {response} doesn't match the generated one {self.resp.json()}.")
if self.exit_on_failure and not self.successful:
raise MisconfigurationException("The model isn't servable. Investigate the traceback and try again.")
if self.successful:
_logger.info(f"Your model is servable and the received payload was {self.resp.json()}.")
@property
def successful(self) -> Optional[bool]:
"""Returns whether the model was successfully served."""
return self.resp.status_code == 200 if self.resp else None
@override
def state_dict(self) -> dict[str, Any]:
return {"successful": self.successful, "optimization": self.optimization, "server": self.server}
@staticmethod
def _start_server(servable_model: ServableModule, host: str, port: int, _: bool) -> None:
"""This method starts a server with a serve and ping endpoints."""
from fastapi import Body, FastAPI
from uvicorn import run
app = FastAPI()
deserializers, serializers = servable_model.configure_serialization()
# Note: This isn't the original version, but a copy.
servable_model.eval()
@app.get("/ping")
def ping() -> bool:
return True
@app.post("/serve")
async def serve(payload: dict = Body(...)) -> dict[str, Any]:
body = payload["body"]
for key, deserializer in deserializers.items():
body[key] = deserializer(body[key])
with torch.no_grad():
output = servable_model.serve_step(**body)
if not isinstance(output, dict):
raise Exception(f"Please, return your outputs as a dictionary. Found {output}")
for key, serializer in serializers.items():
output[key] = serializer(output[key])
return output
run(app, host=host, port=port, log_level="error")
| ServableModuleValidator |
python | pytorch__pytorch | torch/__init__.py | {
"start": 86628,
"end": 87574
} | class ____(_TorchCompileInductorWrapper):
compiler_name = "aotinductor"
def __init__(self, mode, options, dynamic):
super().__init__(mode, options, dynamic)
self.apply_options({"cpp_wrapper": True})
self.apply_options({"aot_inductor.package": True})
def __call__(self, model_, inputs_):
from contextlib import nullcontext
from unittest import mock
from torch._guards import detect_fake_mode
from torch._inductor.virtualized import V
fake_mode = detect_fake_mode(inputs_)
ctx = (
mock.patch.object(fake_mode, "allow_non_fake_inputs", True)
if fake_mode
else nullcontext()
)
with (
V.set_aot_compilation(True),
ctx,
torch._inductor.config.patch("enable_autograd_for_aot", True),
):
return super().__call__(model_, inputs_)
| _TorchCompileAOTInductorWrapper |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/adam.py | {
"start": 1325,
"end": 10709
} | class ____(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use, The
learning rate. Defaults to 0.001.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use, The
exponential decay rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond". Defaults to `False`.
name: Optional name for the operations created when applying gradients.
Defaults to `"Adam"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
gradients by value.
Usage:
>>> opt = tf.keras.optimizers.Adam(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> # The first step is `-learning_rate*sign(grad)`
>>> var1.numpy()
9.9
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
Notes:
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name='Adam',
**kwargs):
super(Adam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adam, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype
),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t,
)
)
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(Adam, self).set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
if not self.amsgrad:
return gen_training_ops.ResourceApplyAdam(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=coefficients['lr_t'],
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return gen_training_ops.ResourceApplyAdamWithAmsgrad(
var=var.handle,
m=m.handle,
v=v.handle,
vhat=vhat.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=coefficients['lr_t'],
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if not self.amsgrad:
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = math_ops.maximum(v_hat, v_t)
with ops.control_dependencies([v_hat_t]):
v_hat_t = state_ops.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = math_ops.sqrt(v_hat_t)
var_update = state_ops.assign_sub(
var,
coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t])
def get_config(self):
config = super(Adam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
})
return config
| Adam |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 14152,
"end": 14841
} | class ____(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(
Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error",
"moves.urllib.error",
)
| Module_six_moves_urllib_error |
python | mlflow__mlflow | examples/flower_classifier/train.py | {
"start": 3214,
"end": 9361
} | class ____(Callback):
"""
Keras callback for logging metrics and final model with MLflow.
Metrics are logged after every epoch. The logger keeps track of the best model based on the
validation metric. At the end of the training, the best model is logged with MLflow.
"""
def __init__(self, model, x_train, y_train, x_valid, y_valid, **kwargs):
self._model = model
self._best_val_loss = math.inf
self._train = (x_train, y_train)
self._valid = (x_valid, y_valid)
self._pyfunc_params = kwargs
self._best_weights = None
def on_epoch_end(self, epoch, logs=None):
"""
Log Keras metrics with MLflow. Update the best model if the model improved on the validation
data.
"""
if not logs:
return
for name, value in logs.items():
name = "valid_" + name[4:] if name.startswith("val_") else "train_" + name
mlflow.log_metric(name, value)
val_loss = logs["val_loss"]
if val_loss < self._best_val_loss:
# Save the "best" weights
self._best_val_loss = val_loss
self._best_weights = [x.copy() for x in self._model.get_weights()]
def on_train_end(self, *args, **kwargs):
"""
Log the best model with MLflow and evaluate it on the train and validation data so that the
metrics stored with MLflow reflect the logged model.
"""
self._model.set_weights(self._best_weights)
x, y = self._train
train_res = self._model.evaluate(x=x, y=y)
for name, value in zip(self._model.metrics_names, train_res):
mlflow.log_metric(f"train_{name}", value)
x, y = self._valid
valid_res = self._model.evaluate(x=x, y=y)
for name, value in zip(self._model.metrics_names, valid_res):
mlflow.log_metric(f"valid_{name}", value)
signature = infer_signature(x, y)
log_model(keras_model=self._model, signature=signature, **self._pyfunc_params)
def _imagenet_preprocess_tf(x):
return (x / 127.5) - 1
def _create_model(input_shape, classes):
image = Input(input_shape)
lambda_layer = Lambda(_imagenet_preprocess_tf)
preprocessed_image = lambda_layer(image)
model = vgg16.VGG16(
classes=classes, input_tensor=preprocessed_image, weights=None, include_top=False
)
x = Flatten(name="flatten")(model.output)
x = Dense(4096, activation="relu", name="fc1")(x)
x = Dense(4096, activation="relu", name="fc2")(x)
x = Dense(classes, activation="softmax", name="predictions")(x)
return Model(inputs=model.input, outputs=x)
def train(
image_files,
labels,
domain,
image_width=224,
image_height=224,
epochs=1,
batch_size=16,
test_ratio=0.2,
seed=None,
):
"""
Train VGG16 model on provided image files. This will create a new MLflow run and log all
parameters, metrics and the resulting model with MLflow. The resulting model is an instance
of KerasImageClassifierPyfunc - a custom python function model that embeds all necessary
preprocessing together with the VGG16 Keras model. The resulting model can be applied
directly to image base64 encoded image data.
Args:
image_files: List of image files to be used for training.
labels: List of labels for the image files.
domain: Dictionary representing the domain of the response.
Provides mapping label-name -> label-id.
image_width: Width of the input image in pixels.
image_height: Height of the input image in pixels.
epochs: Number of epochs to train the model for.
batch_size: Batch size used during training.
test_ratio: Fraction of dataset to be used for validation. This data will not be used
during training.
seed: Random seed. Used e.g. when splitting the dataset into train / validation.
"""
assert len(set(labels)) == len(domain)
input_shape = (image_width, image_height, 3)
with mlflow.start_run():
mlflow.log_param("epochs", str(epochs))
mlflow.log_param("batch_size", str(batch_size))
mlflow.log_param("validation_ratio", str(test_ratio))
if seed:
mlflow.log_param("seed", str(seed))
def _read_image(filename):
with open(filename, "rb") as f:
return f.read()
with tf.Graph().as_default() as g:
with tf.compat.v1.Session(graph=g).as_default():
dims = input_shape[:2]
x = np.array([decode_and_resize_image(_read_image(x), dims) for x in image_files])
y = np_utils.to_categorical(np.array(labels), num_classes=len(domain))
train_size = 1 - test_ratio
x_train, x_valid, y_train, y_valid = train_test_split(
x, y, random_state=seed, train_size=train_size
)
model = _create_model(input_shape=input_shape, classes=len(domain))
model.compile(
optimizer=keras.optimizers.SGD(decay=1e-5, nesterov=True, momentum=0.9),
loss=keras.losses.categorical_crossentropy,
metrics=["accuracy"],
)
sorted_domain = sorted(domain.keys(), key=lambda x: domain[x])
model.fit(
x=x_train,
y=y_train,
validation_data=(x_valid, y_valid),
epochs=epochs,
batch_size=batch_size,
callbacks=[
MlflowLogger(
model=model,
x_train=x_train,
y_train=y_train,
x_valid=x_valid,
y_valid=y_valid,
artifact_path="model",
domain=sorted_domain,
image_dims=input_shape,
)
],
)
if __name__ == "__main__":
run()
| MlflowLogger |
python | huggingface__transformers | src/transformers/pipelines/fill_mask.py | {
"start": 972,
"end": 11064
} | class ____(Pipeline):
_load_processor = False
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
"""
Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling
examples](../task_summary#masked-language-modeling) for more information.
Example:
```python
>>> from transformers import pipeline
>>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
>>> fill_masker("This is a simple [MASK].")
[{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"fill-mask"`.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library. See the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=fill-mask).
<Tip>
This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple
masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect
joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)).
</Tip>
<Tip>
This pipeline now supports tokenizer_kwargs. For example try:
```python
>>> from transformers import pipeline
>>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
>>> tokenizer_kwargs = {"truncation": True}
>>> fill_masker(
... "This is a simple [MASK]. " + "...with a large amount of repeated text appended. " * 100,
... tokenizer_kwargs=tokenizer_kwargs,
... )
```
</Tip>
"""
def get_masked_index(self, input_ids: GenericTensor) -> np.ndarray:
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
return masked_index
def _ensure_exactly_one_mask_token(self, input_ids: GenericTensor) -> np.ndarray:
masked_index = self.get_masked_index(input_ids)
numel = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def ensure_exactly_one_mask_token(self, model_inputs: GenericTensor):
if isinstance(model_inputs, list):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(input_ids)
def preprocess(
self, inputs, return_tensors=None, tokenizer_kwargs=None, **preprocess_parameters
) -> dict[str, GenericTensor]:
if return_tensors is None:
return_tensors = "pt"
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
self.ensure_exactly_one_mask_token(model_inputs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
model_outputs["input_ids"] = model_inputs["input_ids"]
return model_outputs
def postprocess(self, model_outputs, top_k=5, target_ids=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
top_k = target_ids.shape[0]
input_ids = model_outputs["input_ids"][0]
outputs = model_outputs["logits"]
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
logits = outputs[0, masked_index, :]
probs = logits.softmax(dim=-1)
if target_ids is not None:
probs = probs[..., target_ids]
values, predictions = probs.topk(top_k)
result = []
single_mask = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())):
row = []
for v, p in zip(_values, _predictions):
# Copy is important since we're going to modify this array in place
tokens = input_ids.numpy().copy()
if target_ids is not None:
p = target_ids[p].tolist()
tokens[masked_index[i]] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask)
proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
row.append(proposition)
result.append(row)
if single_mask:
return result[0]
return result
def get_target_ids(self, targets):
if isinstance(targets, str):
targets = [targets]
try:
vocab = self.tokenizer.get_vocab()
except Exception:
vocab = {}
target_ids = []
for target in targets:
id_ = vocab.get(target)
if id_ is None:
input_ids = self.tokenizer(
target,
add_special_tokens=False,
return_attention_mask=False,
return_token_type_ids=False,
max_length=1,
truncation=True,
)["input_ids"]
if len(input_ids) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it"
)
continue
id_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`."
)
target_ids.append(id_)
target_ids = list(set(target_ids))
if len(target_ids) == 0:
raise ValueError("At least one target must be provided when passed.")
target_ids = np.array(target_ids)
return target_ids
def _sanitize_parameters(self, top_k=None, targets=None, tokenizer_kwargs=None):
preprocess_params = {}
if tokenizer_kwargs is not None:
preprocess_params["tokenizer_kwargs"] = tokenizer_kwargs
postprocess_params = {}
if targets is not None:
target_ids = self.get_target_ids(targets)
postprocess_params["target_ids"] = target_ids
if top_k is not None:
postprocess_params["top_k"] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`."
)
return preprocess_params, {}, postprocess_params
@overload
def __call__(self, inputs: str, **kwargs: Any) -> list[dict[str, Any]]: ...
@overload
def __call__(self, inputs: list[str], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(self, inputs: str | list[str], **kwargs: Any) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
"""
Fill the masked token in the text(s) given as inputs.
Args:
inputs (`str` or `list[str]`):
One or several texts (or one list of prompts) with masked tokens.
targets (`str` or `list[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first
resulting token will be used (with a warning, and that might be slower).
top_k (`int`, *optional*):
When passed, overrides the number of predictions to return.
Return:
A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
- **sequence** (`str`) -- The corresponding input with the mask token prediction.
- **score** (`float`) -- The corresponding probability.
- **token** (`int`) -- The predicted token id (to replace the masked one).
- **token_str** (`str`) -- The predicted token (to replace the masked one).
"""
outputs = super().__call__(inputs, **kwargs)
if isinstance(inputs, list) and len(inputs) == 1:
return outputs[0]
return outputs
| FillMaskPipeline |
python | pypa__hatch | src/hatch/env/plugin/interface.py | {
"start": 662,
"end": 36280
} | class ____(ABC):
"""
Example usage:
```python tab="plugin.py"
from hatch.env.plugin.interface import EnvironmentInterface
class SpecialEnvironment(EnvironmentInterface):
PLUGIN_NAME = "special"
...
```
```python tab="hooks.py"
from hatchling.plugin import hookimpl
from .plugin import SpecialEnvironment
@hookimpl
def hatch_register_environment():
return SpecialEnvironment
```
"""
PLUGIN_NAME = ""
"""The name used for selection."""
def __init__(
self,
root,
metadata,
name,
config,
matrix_variables,
data_directory,
isolated_data_directory,
platform,
verbosity,
app,
):
self.__root = root
self.__metadata = metadata
self.__name = name
self.__config = config
self.__matrix_variables = matrix_variables
self.__data_directory = data_directory
self.__isolated_data_directory = isolated_data_directory
self.__platform = platform
self.__verbosity = verbosity
self.__app = app
self.additional_dependencies = []
@property
def matrix_variables(self):
return self.__matrix_variables
@property
def app(self):
"""
An instance of [Application](../utilities.md#hatchling.bridge.app.Application).
"""
return self.__app
@cached_property
def context(self):
return self.get_context()
@property
def verbosity(self):
return self.__verbosity
@property
def root(self):
"""
The root of the local project tree as a path-like object.
"""
return self.__root
@property
def metadata(self):
return self.__metadata
@property
def name(self) -> str:
"""
The name of the environment.
"""
return self.__name
@property
def platform(self):
"""
An instance of [Platform](../utilities.md#hatch.utils.platform.Platform).
"""
return self.__platform
@property
def data_directory(self):
"""
The [directory](../../config/hatch.md#environments) this plugin should use for storage as a path-like object.
If the user has not configured one then this will be the same as the
[isolated data directory](reference.md#hatch.env.plugin.interface.EnvironmentInterface.isolated_data_directory).
"""
return self.__data_directory
@property
def isolated_data_directory(self):
"""
The default [directory](../../config/hatch.md#environments) reserved exclusively for this plugin as a path-like
object.
"""
return self.__isolated_data_directory
@property
def config(self) -> dict:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
```
"""
return self.__config
@cached_property
def project_root(self) -> str:
"""
The root of the project tree as a string. If the environment is not running locally,
this should be the remote path to the project.
"""
return str(self.root)
@cached_property
def sep(self) -> str:
"""
The character used to separate directories in paths. By default, this is `\\` on Windows and `/` otherwise.
"""
return os.sep
@cached_property
def pathsep(self) -> str:
"""
The character used to separate paths. By default, this is `;` on Windows and `:` otherwise.
"""
return os.pathsep
@cached_property
def system_python(self) -> str:
system_python = os.environ.get(AppEnvVars.PYTHON)
if system_python == "self":
system_python = sys.executable
system_python = (
system_python
or self.platform.modules.shutil.which("python")
or self.platform.modules.shutil.which("python3")
or sys.executable
)
if not isabs(system_python):
system_python = self.platform.modules.shutil.which(system_python)
return system_python
@cached_property
def env_vars(self) -> dict[str, str]:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>.env-vars]
```
"""
env_vars = self.config.get("env-vars", {})
if not isinstance(env_vars, dict):
message = f"Field `tool.hatch.envs.{self.name}.env-vars` must be a mapping"
raise TypeError(message)
for key, value in env_vars.items():
if not isinstance(value, str):
message = (
f"Environment variable `{key}` of field `tool.hatch.envs.{self.name}.env-vars` must be a string"
)
raise TypeError(message)
new_env_vars = {}
with self.metadata.context.apply_context(self.context):
for key, value in env_vars.items():
new_env_vars[key] = self.metadata.context.format(value)
new_env_vars[AppEnvVars.ENV_ACTIVE] = self.name
return new_env_vars
@cached_property
def env_include(self) -> list[str]:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
env-include = [...]
```
"""
env_include = self.config.get("env-include", [])
if not isinstance(env_include, list):
message = f"Field `tool.hatch.envs.{self.name}.env-include` must be an array"
raise TypeError(message)
for i, pattern in enumerate(env_include, 1):
if not isinstance(pattern, str):
message = f"Pattern #{i} of field `tool.hatch.envs.{self.name}.env-include` must be a string"
raise TypeError(message)
return ["HATCH_BUILD_*", *env_include] if env_include else env_include
@cached_property
def env_exclude(self) -> list[str]:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
env-exclude = [...]
```
"""
env_exclude = self.config.get("env-exclude", [])
if not isinstance(env_exclude, list):
message = f"Field `tool.hatch.envs.{self.name}.env-exclude` must be an array"
raise TypeError(message)
for i, pattern in enumerate(env_exclude, 1):
if not isinstance(pattern, str):
message = f"Pattern #{i} of field `tool.hatch.envs.{self.name}.env-exclude` must be a string"
raise TypeError(message)
return env_exclude
@cached_property
def environment_dependencies_complex(self) -> list[Dependency]:
from hatch.dep.core import Dependency, InvalidDependencyError
dependencies_complex: list[Dependency] = []
with self.apply_context():
for option in ("dependencies", "extra-dependencies"):
dependencies = self.config.get(option, [])
if not isinstance(dependencies, list):
message = f"Field `tool.hatch.envs.{self.name}.{option}` must be an array"
raise TypeError(message)
for i, entry in enumerate(dependencies, 1):
if not isinstance(entry, str):
message = f"Dependency #{i} of field `tool.hatch.envs.{self.name}.{option}` must be a string"
raise TypeError(message)
try:
dependencies_complex.append(Dependency(self.metadata.context.format(entry)))
except InvalidDependencyError as e:
message = f"Dependency #{i} of field `tool.hatch.envs.{self.name}.{option}` is invalid: {e}"
raise ValueError(message) from None
return dependencies_complex
@cached_property
def environment_dependencies(self) -> list[str]:
"""
The list of all [environment dependencies](../../config/environment/overview.md#dependencies).
"""
return [str(dependency) for dependency in self.environment_dependencies_complex]
@cached_property
def project_dependencies_complex(self) -> list[Dependency]:
workspace_dependencies = self.workspace.get_dependencies()
if self.skip_install and not self.features and not self.dependency_groups and not workspace_dependencies:
return []
from hatch.dep.core import Dependency
from hatch.utils.dep import get_complex_dependencies, get_complex_dependency_group, get_complex_features
all_dependencies_complex = list(map(Dependency, workspace_dependencies))
dependencies, optional_dependencies = self.app.project.get_dependencies()
# Format dependencies with context before creating Dependency objects
with self.apply_context():
formatted_dependencies = [self.metadata.context.format(dep) for dep in dependencies]
formatted_optional_dependencies = {
feature: [self.metadata.context.format(dep) for dep in deps]
for feature, deps in optional_dependencies.items()
}
dependencies_complex = get_complex_dependencies(formatted_dependencies)
optional_dependencies_complex = get_complex_features(formatted_optional_dependencies)
if not self.skip_install:
all_dependencies_complex.extend(dependencies_complex.values())
for feature in self.features:
if feature not in optional_dependencies_complex:
message = (
f"Feature `{feature}` of field `tool.hatch.envs.{self.name}.features` is not "
f"defined in the dynamic field `project.optional-dependencies`"
)
raise ValueError(message)
all_dependencies_complex.extend([
dep if isinstance(dep, Dependency) else Dependency(str(dep))
for dep in optional_dependencies_complex[feature]
])
for dependency_group in self.dependency_groups:
all_dependencies_complex.extend(
get_complex_dependency_group(self.app.project.dependency_groups, dependency_group)
)
return all_dependencies_complex
@cached_property
def project_dependencies(self) -> list[str]:
"""
The list of all [project dependencies](../../config/metadata.md#dependencies) (if
[installed](../../config/environment/overview.md#skip-install)), selected
[optional dependencies](../../config/environment/overview.md#features), and
workspace dependencies.
"""
return [str(dependency) for dependency in self.project_dependencies_complex]
@cached_property
def local_dependencies_complex(self) -> list[Dependency]:
from hatch.dep.core import Dependency
local_dependencies_complex = []
if not self.skip_install:
local_dependencies_complex.append(
Dependency(f"{self.metadata.name} @ {self.root.as_uri()}", editable=self.dev_mode)
)
if self.workspace.members:
local_dependencies_complex.extend(
Dependency(f"{member.project.metadata.name} @ {member.project.location.as_uri()}", editable=True)
for member in self.workspace.members
)
return local_dependencies_complex
@cached_property
def dependencies_complex(self) -> list[Dependency]:
from hatch.dep.core import Dependency
all_dependencies_complex = list(self.environment_dependencies_complex)
# Convert additional_dependencies to Dependency objects
for dep in self.additional_dependencies:
if isinstance(dep, Dependency):
all_dependencies_complex.append(dep)
else:
all_dependencies_complex.append(Dependency(str(dep)))
if self.builder:
from hatch.project.constants import BuildEnvVars
# Convert build requirements to Dependency objects
for req in self.metadata.build.requires_complex:
if isinstance(req, Dependency):
all_dependencies_complex.append(req)
else:
all_dependencies_complex.append(Dependency(str(req)))
for target in os.environ.get(BuildEnvVars.REQUESTED_TARGETS, "").split():
target_config = self.app.project.config.build.target(target)
all_dependencies_complex.extend(map(Dependency, target_config.dependencies))
return all_dependencies_complex
# Ensure these are checked last to speed up initial environment creation since
# they will already be installed along with the project
if self.dev_mode:
all_dependencies_complex.extend(self.project_dependencies_complex)
return all_dependencies_complex
@cached_property
def dependencies(self) -> list[str]:
"""
The list of all
[project dependencies](reference.md#hatch.env.plugin.interface.EnvironmentInterface.project_dependencies)
(if in [dev mode](../../config/environment/overview.md#dev-mode)) and
[environment dependencies](../../config/environment/overview.md#dependencies).
"""
return [str(dependency) for dependency in self.dependencies_complex]
@cached_property
def all_dependencies_complex(self) -> list[Dependency]:
from hatch.dep.core import Dependency
local_deps = list(self.local_dependencies_complex)
other_deps = list(self.dependencies_complex)
# Create workspace member name set for conflict detection
workspace_names = {dep.name.lower() for dep in local_deps}
# Filter out conflicting dependencies, keeping only workspace versions
filtered_deps = [
dep if isinstance(dep, Dependency) else Dependency(str(dep))
for dep in other_deps
if dep.name.lower() not in workspace_names
]
# Workspace members first to ensure precedence
return local_deps + filtered_deps
@cached_property
def all_dependencies(self) -> list[str]:
return [str(dependency) for dependency in self.all_dependencies_complex]
@cached_property
def platforms(self) -> list[str]:
"""
All names are stored as their lower-cased version.
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
platforms = [...]
```
"""
platforms = self.config.get("platforms", [])
if not isinstance(platforms, list):
message = f"Field `tool.hatch.envs.{self.name}.platforms` must be an array"
raise TypeError(message)
for i, command in enumerate(platforms, 1):
if not isinstance(command, str):
message = f"Platform #{i} of field `tool.hatch.envs.{self.name}.platforms` must be a string"
raise TypeError(message)
return [platform.lower() for platform in platforms]
@cached_property
def skip_install(self) -> bool:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
skip-install = ...
```
"""
skip_install = self.config.get("skip-install", not self.metadata.has_project_file())
if not isinstance(skip_install, bool):
message = f"Field `tool.hatch.envs.{self.name}.skip-install` must be a boolean"
raise TypeError(message)
return skip_install
@cached_property
def dev_mode(self) -> bool:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
dev-mode = ...
```
"""
dev_mode = self.config.get("dev-mode", True)
if not isinstance(dev_mode, bool):
message = f"Field `tool.hatch.envs.{self.name}.dev-mode` must be a boolean"
raise TypeError(message)
return dev_mode
@cached_property
def builder(self) -> bool:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
builder = ...
```
"""
builder = self.config.get("builder", False)
if not isinstance(builder, bool):
message = f"Field `tool.hatch.envs.{self.name}.builder` must be a boolean"
raise TypeError(message)
return builder
@cached_property
def features(self):
from hatch.utils.metadata import normalize_project_name
features = self.config.get("features", [])
if not isinstance(features, list):
message = f"Field `tool.hatch.envs.{self.name}.features` must be an array of strings"
raise TypeError(message)
all_features = set()
for i, feature in enumerate(features, 1):
if not isinstance(feature, str):
message = f"Feature #{i} of field `tool.hatch.envs.{self.name}.features` must be a string"
raise TypeError(message)
if not feature:
message = f"Feature #{i} of field `tool.hatch.envs.{self.name}.features` cannot be an empty string"
raise ValueError(message)
normalized_feature = (
feature if self.metadata.hatch.metadata.allow_ambiguous_features else normalize_project_name(feature)
)
if (
not self.metadata.hatch.metadata.hook_config
and normalized_feature not in self.metadata.core.optional_dependencies
):
message = (
f"Feature `{normalized_feature}` of field `tool.hatch.envs.{self.name}.features` is not "
f"defined in field `project.optional-dependencies`"
)
raise ValueError(message)
all_features.add(normalized_feature)
return sorted(all_features)
@cached_property
def dependency_groups(self):
from hatch.utils.metadata import normalize_project_name
dependency_groups = self.config.get("dependency-groups", [])
if not isinstance(dependency_groups, list):
message = f"Field `tool.hatch.envs.{self.name}.dependency-groups` must be an array of strings"
raise TypeError(message)
all_dependency_groups = set()
for i, dependency_group in enumerate(dependency_groups, 1):
if not isinstance(dependency_group, str):
message = (
f"Dependency Group #{i} of field `tool.hatch.envs.{self.name}.dependency-groups` must be a string"
)
raise TypeError(message)
if not dependency_group:
message = f"Dependency Group #{i} of field `tool.hatch.envs.{self.name}.dependency-groups` cannot be an empty string"
raise ValueError(message)
normalized_dependency_group = normalize_project_name(dependency_group)
if (
not self.metadata.hatch.metadata.hook_config
and normalized_dependency_group not in self.app.project.dependency_groups
):
message = (
f"Dependency Group `{normalized_dependency_group}` of field `tool.hatch.envs.{self.name}.dependency-groups` is not "
f"defined in field `dependency-groups`"
)
raise ValueError(message)
all_dependency_groups.add(normalized_dependency_group)
return sorted(all_dependency_groups)
@cached_property
def description(self) -> str:
"""
```toml config-example
[tool.hatch.envs.<ENV_NAME>]
description = ...
```
"""
description = self.config.get("description", "")
if not isinstance(description, str):
message = f"Field `tool.hatch.envs.{self.name}.description` must be a string"
raise TypeError(message)
return description
@cached_property
def scripts(self):
config = {}
# Extra scripts should come first to give less precedence
for field in ("extra-scripts", "scripts"):
script_config = self.config.get(field, {})
if not isinstance(script_config, dict):
message = f"Field `tool.hatch.envs.{self.name}.{field}` must be a table"
raise TypeError(message)
for name, data in script_config.items():
if " " in name:
message = (
f"Script name `{name}` in field `tool.hatch.envs.{self.name}.{field}` must not contain spaces"
)
raise ValueError(message)
commands = []
if isinstance(data, str):
commands.append(data)
elif isinstance(data, list):
for i, command in enumerate(data, 1):
if not isinstance(command, str):
message = (
f"Command #{i} in field `tool.hatch.envs.{self.name}.{field}.{name}` must be a string"
)
raise TypeError(message)
commands.append(command)
else:
message = (
f"Field `tool.hatch.envs.{self.name}.{field}.{name}` must be a string or an array of strings"
)
raise TypeError(message)
config[name] = commands
seen = {}
active = []
for script_name, commands in config.items():
commands[:] = expand_script_commands(self.name, script_name, commands, config, seen, active)
return config
@cached_property
def pre_install_commands(self):
pre_install_commands = self.config.get("pre-install-commands", [])
if not isinstance(pre_install_commands, list):
message = f"Field `tool.hatch.envs.{self.name}.pre-install-commands` must be an array"
raise TypeError(message)
for i, command in enumerate(pre_install_commands, 1):
if not isinstance(command, str):
message = f"Command #{i} of field `tool.hatch.envs.{self.name}.pre-install-commands` must be a string"
raise TypeError(message)
return list(pre_install_commands)
@cached_property
def post_install_commands(self):
post_install_commands = self.config.get("post-install-commands", [])
if not isinstance(post_install_commands, list):
message = f"Field `tool.hatch.envs.{self.name}.post-install-commands` must be an array"
raise TypeError(message)
for i, command in enumerate(post_install_commands, 1):
if not isinstance(command, str):
message = f"Command #{i} of field `tool.hatch.envs.{self.name}.post-install-commands` must be a string"
raise TypeError(message)
return list(post_install_commands)
@cached_property
def workspace(self) -> Workspace:
env_config = self.config.get("workspace", {})
if not isinstance(env_config, dict):
message = f"Field `tool.hatch.envs.{self.name}.workspace` must be a table"
raise TypeError(message)
return Workspace(self, env_config)
def activate(self):
"""
A convenience method called when using the environment as a context manager:
```python
with environment:
...
```
"""
def deactivate(self):
"""
A convenience method called after using the environment as a context manager:
```python
with environment:
...
```
"""
@abstractmethod
def find(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should return information about how to locate the environment or represent its ID in
some way. Additionally, this is expected to return something even if the environment is
[incompatible](reference.md#hatch.env.plugin.interface.EnvironmentInterface.check_compatibility).
"""
@abstractmethod
def create(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should perform the necessary steps to set up the environment.
"""
@abstractmethod
def remove(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should perform the necessary steps to completely remove the environment from the system and will only
be triggered manually by users with the [`env remove`](../../cli/reference.md#hatch-env-remove) or
[`env prune`](../../cli/reference.md#hatch-env-prune) commands.
"""
@abstractmethod
def exists(self) -> bool:
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should indicate whether or not the environment has already been created.
"""
@abstractmethod
def install_project(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should install the project in the environment.
"""
@abstractmethod
def install_project_dev_mode(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should install the project in the environment such that the environment
always reflects the current state of the project.
"""
@abstractmethod
def dependencies_in_sync(self) -> bool:
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should indicate whether or not the environment is compatible with the current
[dependencies](reference.md#hatch.env.plugin.interface.EnvironmentInterface.dependencies).
"""
@abstractmethod
def sync_dependencies(self):
"""
:material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right:
This should install the
[dependencies](reference.md#hatch.env.plugin.interface.EnvironmentInterface.dependencies)
in the environment.
"""
def dependency_hash(self):
"""
This should return a hash of the environment's
[dependencies](reference.md#hatch.env.plugin.interface.EnvironmentInterface.dependencies)
and any other data that is handled by the
[sync_dependencies](reference.md#hatch.env.plugin.interface.EnvironmentInterface.sync_dependencies)
and
[dependencies_in_sync](reference.md#hatch.env.plugin.interface.EnvironmentInterface.dependencies_in_sync)
methods.
"""
from hatch.utils.dep import hash_dependencies
return hash_dependencies(self.all_dependencies_complex)
@contextmanager
def app_status_creation(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
with self.app.status(f"Creating environment: {self.name}"):
yield
@contextmanager
def app_status_pre_installation(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
with self.app.status("Running pre-installation commands"):
yield
@contextmanager
def app_status_post_installation(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
with self.app.status("Running post-installation commands"):
yield
@contextmanager
def app_status_project_installation(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
if self.dev_mode:
with self.app.status("Installing project in development mode"):
yield
else:
with self.app.status("Installing project"):
yield
@contextmanager
def app_status_dependency_state_check(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
if not self.skip_install and (
"dependencies" in self.metadata.dynamic or "optional-dependencies" in self.metadata.dynamic
):
with self.app.status("Polling dependency state"):
yield
else:
yield
@contextmanager
def app_status_dependency_installation_check(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
with self.app.status("Checking dependencies"):
yield
@contextmanager
def app_status_dependency_synchronization(self):
"""
See the [life cycle of environments](reference.md#life-cycle).
"""
with self.app.status("Syncing dependencies"):
yield
@contextmanager
def fs_context(self) -> Generator[FileSystemContext, None, None]:
"""
A context manager that must yield a subclass of
[FileSystemContext](../utilities.md#hatch.env.plugin.interface.FileSystemContext).
"""
from hatch.utils.fs import temp_directory
with temp_directory() as temp_dir:
yield FileSystemContext(self, local_path=temp_dir, env_path=str(temp_dir))
def enter_shell(
self,
name: str, # noqa: ARG002
path: str,
args: Iterable[str],
):
"""
Spawn a [shell](../../config/hatch.md#shell) within the environment.
This should either use
[command_context](reference.md#hatch.env.plugin.interface.EnvironmentInterface.command_context)
directly or provide the same guarantee.
"""
with self.command_context():
self.platform.exit_with_command([path, *args])
def run_shell_command(self, command: str, **kwargs):
"""
This should return the standard library's
[subprocess.CompletedProcess](https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess)
and will always be called when the
[command_context](reference.md#hatch.env.plugin.interface.EnvironmentInterface.command_context)
is active, with the expectation of providing the same guarantee.
"""
kwargs.setdefault("shell", True)
return self.platform.run_command(command, **kwargs)
@contextmanager
def command_context(self):
"""
A context manager that when active should make executed shell commands reflect any
[environment variables](reference.md#hatch.env.plugin.interface.EnvironmentInterface.get_env_vars)
the user defined either currently or at the time of
[creation](reference.md#hatch.env.plugin.interface.EnvironmentInterface.create).
For an example, open the default implementation below:
"""
with self.get_env_vars():
yield
def resolve_commands(self, commands: list[str]):
"""
This expands each command into one or more commands based on any
[scripts](../../config/environment/overview.md#scripts) that the user defined.
"""
for command in commands:
yield from self.expand_command(command)
def expand_command(self, command):
possible_script, args, _ignore_exit_code = parse_script_command(command)
# Indicate undefined
if not args:
args = None
with self.apply_context():
if possible_script in self.scripts:
if args is not None:
args = self.metadata.context.format(args)
for cmd in self.scripts[possible_script]:
yield self.metadata.context.format(cmd, args=args).strip()
else:
yield self.metadata.context.format(command, args=args).strip()
def construct_pip_install_command(self, args: list[str]):
"""
A convenience method for constructing a [`pip install`](https://pip.pypa.io/en/stable/cli/pip_install/)
command with the given verbosity. The default verbosity is set to one less than Hatch's verbosity.
"""
command = ["python", "-u", "-m", "pip", "install", "--disable-pip-version-check"]
# Default to -1 verbosity
add_verbosity_flag(command, self.verbosity, adjustment=-1)
command.extend(args)
return command
def join_command_args(self, args: list[str]):
"""
This is used by the [`run`](../../cli/reference.md#hatch-run) command to construct the root command string
from the received arguments.
"""
return self.platform.join_command_args(args)
def apply_features(self, requirement: str):
"""
A convenience method that applies any user defined [features](../../config/environment/overview.md#features)
to the given requirement.
"""
if self.features:
features = ",".join(self.features)
return f"{requirement}[{features}]"
return requirement
def check_compatibility(self):
"""
This raises an exception if the environment is not compatible with the user's setup. The default behavior
checks for [platform compatibility](../../config/environment/overview.md#supported-platforms)
and any method override should keep this check.
This check is never performed if the environment has been
[created](reference.md#hatch.env.plugin.interface.EnvironmentInterface.create).
"""
if self.platforms and self.platform.name not in self.platforms:
message = "unsupported platform"
raise OSError(message)
def get_env_vars(self) -> EnvVars:
"""
Returns a mapping of environment variables that should be available to the environment. The object can
be used as a context manager to temporarily apply the environment variables to the current process.
!!! note
The environment variable `HATCH_ENV_ACTIVE` will always be set to the name of the environment.
"""
return EnvVars(self.env_vars, self.env_include, self.env_exclude)
def get_env_var_option(self, option: str) -> str:
"""
Returns the value of the upper-cased environment variable `HATCH_ENV_TYPE_<PLUGIN_NAME>_<option>`.
"""
return get_env_var_option(plugin_name=self.PLUGIN_NAME, option=option)
def get_context(self):
"""
Returns a subclass of
[EnvironmentContextFormatter](../utilities.md#hatch.env.context.EnvironmentContextFormatter).
"""
from hatch.env.context import EnvironmentContextFormatter
return EnvironmentContextFormatter(self)
@staticmethod
def get_option_types() -> dict:
"""
Returns a mapping of supported options to their respective types so that they can be used by
[overrides](../../config/environment/advanced.md#option-overrides).
"""
return {}
@contextmanager
def apply_context(self):
with self.get_env_vars(), self.metadata.context.apply_context(self.context):
yield
def __enter__(self):
self.activate()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.deactivate()
| EnvironmentInterface |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py | {
"start": 2317,
"end": 9497
} | class ____(AwsBaseOperator[SageMakerHook]):
"""
This is the base operator for all SageMaker operators.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param config: The configuration necessary to start a training job (templated)
"""
aws_hook_class = SageMakerHook
template_fields: Sequence[str] = aws_template_fields(
"config",
)
template_ext: Sequence[str] = ()
template_fields_renderers: ClassVar[dict] = {"config": "json"}
ui_color: str = "#ededed"
integer_fields: list[list[Any]] = []
def __init__(self, *, config: dict, **kwargs):
super().__init__(**kwargs)
self.config = config
def parse_integer(self, config: dict | list, field: list[str] | str) -> None:
"""Recursive method for parsing string fields holding integer values to integers."""
if len(field) == 1:
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
head = field[0]
if head in config:
config[head] = int(config[head])
return
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
(head, tail) = (field[0], field[1:])
if head in config:
self.parse_integer(config[head], tail)
return
def parse_config_integers(self) -> None:
"""Parse the integer fields to ints in case the config is rendered by Jinja and all fields are str."""
for field in self.integer_fields:
self.parse_integer(self.config, field)
def expand_role(self) -> None:
"""Call boto3's `expand_role`, which expands an IAM role name into an ARN."""
def preprocess_config(self) -> None:
"""Process the config into a usable form."""
self._create_integer_fields()
self.log.info("Preprocessing the config and doing required s3_operations")
self.hook.configure_s3_resources(self.config)
self.parse_config_integers()
self.expand_role()
self.log.info(
"After preprocessing the config is:\n %s",
json.dumps(self.config, sort_keys=True, indent=4, separators=(",", ": ")),
)
def _create_integer_fields(self) -> None:
"""
Set fields which should be cast to integers.
Child classes should override this method if they need integer fields parsed.
"""
self.integer_fields = []
def _get_unique_job_name(
self, proposed_name: str, fail_if_exists: bool, describe_func: Callable[[str], Any]
) -> str:
"""
Return the proposed name if it doesn't already exist, otherwise returns it with a timestamp suffix.
:param proposed_name: Base name.
:param fail_if_exists: Will throw an error if a job with that name already exists
instead of finding a new name.
:param describe_func: The `describe_` function for that kind of job.
We use it as an O(1) way to check if a job exists.
"""
return self._get_unique_name(
proposed_name, fail_if_exists, describe_func, self._check_if_job_exists, "job"
)
def _get_unique_name(
self,
proposed_name: str,
fail_if_exists: bool,
describe_func: Callable[[str], Any],
check_exists_func: Callable[[str, Callable[[str], Any]], bool],
resource_type: str,
) -> str:
"""
Return the proposed name if it doesn't already exist, otherwise returns it with a timestamp suffix.
:param proposed_name: Base name.
:param fail_if_exists: Will throw an error if a resource with that name already exists
instead of finding a new name.
:param check_exists_func: The function to check if the resource exists.
It should take the resource name and a describe function as arguments.
:param resource_type: Type of the resource (e.g., "model", "job").
"""
self._check_resource_type(resource_type)
name = proposed_name
while check_exists_func(name, describe_func):
# this while should loop only once in most cases, just setting it this way to regenerate a name
# in case there is collision.
if fail_if_exists:
raise AirflowException(f"A SageMaker {resource_type} with name {name} already exists.")
max_name_len = 63
timestamp = str(time.time_ns() // 1000000000) # only keep the relevant datetime (first 10 digits)
name = f"{proposed_name[: max_name_len - len(timestamp) - 1]}-{timestamp}" # we subtract one to make provision for the dash between the truncated name and timestamp
self.log.info("Changed %s name to '%s' to avoid collision.", resource_type, name)
return name
def _check_resource_type(self, resource_type: str):
"""Raise exception if resource type is not 'model' or 'job'."""
if resource_type not in ("model", "job"):
raise AirflowException(
f"Argument resource_type accepts only 'model' and 'job'. Provided value: '{resource_type}'."
)
def _check_if_job_exists(self, job_name: str, describe_func: Callable[[str], Any]) -> bool:
"""Return True if job exists, False otherwise."""
return self._check_if_resource_exists(job_name, "job", describe_func)
def _check_if_resource_exists(
self, resource_name: str, resource_type: str, describe_func: Callable[[str], Any]
) -> bool:
"""Return True if resource exists, False otherwise."""
self._check_resource_type(resource_type)
try:
describe_func(resource_name)
self.log.info("Found existing %s with name '%s'.", resource_type, resource_name)
return True
except ClientError as e:
if e.response["Error"]["Code"] == "ValidationException":
return False # ValidationException is thrown when the resource could not be found
raise e
def execute(self, context: Context):
raise NotImplementedError("Please implement execute() in sub class!")
@staticmethod
def path_to_s3_dataset(path) -> Dataset:
from airflow.providers.common.compat.openlineage.facet import Dataset
path = path.replace("s3://", "")
split_path = path.split("/")
return Dataset(namespace=f"s3://{split_path[0]}", name="/".join(split_path[1:]), facets={})
| SageMakerBaseOperator |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/asserts.py | {
"start": 2815,
"end": 4935
} | class ____(FormatChecker):
@staticmethod
def check_datetime(value: str) -> bool:
valid_format = timestamp_regex.match(value)
try:
pendulum.parse(value, strict=False)
except ValueError:
valid_time = False
else:
valid_time = True
return valid_format and valid_time
def check(self, instance, format):
if instance is not None and format == "date-time":
if not self.check_datetime(instance):
raise FormatError(f"{instance} has invalid datetime format")
else:
return super().check(instance, format)
def verify_records_schema(
records: List[AirbyteRecordMessage], catalog: ConfiguredAirbyteCatalog
) -> Mapping[str, Mapping[str, ValidationError]]:
"""Check records against their schemas from the catalog, yield error messages.
Only first record with error will be yielded for each stream.
"""
stream_validators = {}
for stream in catalog.streams:
schema_to_validate_against = stream.stream.json_schema
# We will be disabling strict `NoAdditionalPropertiesValidator` until we have a better plan for schema validation. The consequence
# is that we will lack visibility on new fields that are not added on the root level (root level is validated by Datadog)
# validator = NoAdditionalPropertiesValidator if fail_on_extra_columns else Draft7ValidatorWithStrictInteger
validator = Draft7ValidatorWithStrictInteger
stream_validators[stream.stream.name] = validator(schema_to_validate_against, format_checker=CustomFormatChecker())
stream_errors = defaultdict(dict)
for record in records:
validator = stream_validators.get(record.stream)
if not validator:
logging.error(f"Received record from the `{record.stream}` stream, which is not in the catalog.")
continue
errors = list(validator.iter_errors(record.data))
for error in errors:
stream_errors[record.stream][str(error.schema_path)] = error
return stream_errors
| CustomFormatChecker |
python | protocolbuffers__protobuf | python/google/protobuf/internal/reflection_test.py | {
"start": 95923,
"end": 96397
} | class ____(unittest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
@testing_refleaks.TestCase
| MutualRecursionEqualityTest |
python | Netflix__metaflow | metaflow/plugins/pypi/conda_decorator.py | {
"start": 293,
"end": 10418
} | class ____(StepDecorator):
"""
Specifies the Conda environment for the step.
Information in this decorator will augment any
attributes set in the `@conda_base` flow-level decorator. Hence,
you can use `@conda_base` to set packages required by all
steps and use `@conda` to specify step-specific overrides.
Parameters
----------
packages : Dict[str, str], default {}
Packages to use for this step. The key is the name of the package
and the value is the version to use.
libraries : Dict[str, str], default {}
Supported for backward compatibility. When used with packages, packages will take precedence.
python : str, optional, default None
Version of Python to use, e.g. '3.7.4'. A default value of None implies
that the version used will correspond to the version of the Python interpreter used to start the run.
disabled : bool, default False
If set to True, disables @conda.
"""
name = "conda"
defaults = {
"packages": {},
"libraries": {}, # Deprecated! Use packages going forward
"python": None,
"disabled": None,
}
_metaflow_home = None
_addl_env_vars = None
# To define conda channels for the whole solve, users can specify
# CONDA_CHANNELS in their environment. For pinning specific packages to specific
# conda channels, users can specify channel::package as the package name.
def __init__(self, attributes=None, statically_defined=False, inserted_by=None):
self._attributes_with_user_values = (
set(attributes.keys()) if attributes is not None else set()
)
super(CondaStepDecorator, self).__init__(
attributes, statically_defined, inserted_by
)
def init(self):
# Support legacy 'libraries=' attribute for the decorator.
self.attributes["packages"] = {
**self.attributes["libraries"],
**self.attributes["packages"],
}
# Keep because otherwise make_decorator_spec will fail
self.attributes["libraries"] = {}
if self.attributes["packages"]:
self._attributes_with_user_values.add("packages")
def is_attribute_user_defined(self, name):
return name in self._attributes_with_user_values
def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
# The init_environment hook for Environment creates the relevant virtual
# environments. The step_init hook sets up the relevant state for that hook to
# do it's magic.
self.flow = flow
self.step = step
self.environment = environment
self.datastore = flow_datastore
# Support flow-level decorator.
if "conda_base" in self.flow._flow_decorators:
conda_base = self.flow._flow_decorators["conda_base"][0]
super_attributes = conda_base.attributes
self.attributes["packages"] = {
**super_attributes["packages"],
**self.attributes["packages"],
}
self._attributes_with_user_values.update(
conda_base._attributes_with_user_values
)
self.attributes["python"] = (
self.attributes["python"] or super_attributes["python"]
)
self.attributes["disabled"] = (
self.attributes["disabled"]
if self.attributes["disabled"] is not None
else super_attributes["disabled"]
)
# Set default for `disabled` argument.
if not self.attributes["disabled"]:
self.attributes["disabled"] = False
# Set Python interpreter to user's Python if necessary.
if not self.attributes["python"]:
self.attributes["python"] = platform.python_version() # CPython!
# @conda uses a conda environment to create a virtual environment.
# The conda environment can be created through micromamba.
_supported_virtual_envs = ["conda"]
# To placate people who don't want to see a shred of conda in UX, we symlink
# --environment=pypi to --environment=conda
_supported_virtual_envs.extend(["pypi"])
# TODO: Hardcoded for now to support the fast bakery environment.
# We should introduce a more robust mechanism for appending supported environments, for example from within extensions.
_supported_virtual_envs.extend(["fast-bakery"])
# The --environment= requirement ensures that valid virtual environments are
# created for every step to execute it, greatly simplifying the @conda
# implementation.
if environment.TYPE not in _supported_virtual_envs:
raise InvalidEnvironmentException(
"@%s decorator requires %s"
% (
self.name,
" or ".join(
["--environment=%s" % env for env in _supported_virtual_envs]
),
)
)
# At this point, the list of 32 bit instance types is shrinking quite rapidly.
# We can worry about supporting them when there is a need.
# TODO: This code snippet can be done away with by altering the constructor of
# MetaflowEnvironment. A good first-task exercise.
# Avoid circular import
from metaflow.plugins.datastores.local_storage import LocalStorage
environment.set_local_root(LocalStorage.get_datastore_root_from_config(logger))
self.disabled = self.environment.is_disabled(
next(step for step in self.flow if step.name == self.step)
)
def runtime_init(self, flow, graph, package, run_id):
if self.disabled:
return
# We need to make all the code package available to the user code in
# a temporary directory which will be added to the PYTHONPATH.
if self.__class__._metaflow_home is None:
# Do this ONCE per flow
self.__class__._metaflow_home = tempfile.TemporaryDirectory(dir="/tmp")
package.extract_into(
self.__class__._metaflow_home.name, ContentType.ALL_CONTENT
)
self.__class__._addl_env_vars = package.get_post_extract_env_vars(
package.package_metadata, self.__class__._metaflow_home.name
)
# # Also install any environment escape overrides directly here to enable
# # the escape to work even in non metaflow-created subprocesses
# from ..env_escape import generate_trampolines
# generate_trampolines(self.metaflow_dir.name)
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
if self.disabled:
return
self.interpreter = (
self.environment.interpreter(self.step)
if not any(
decorator.name
in ["batch", "kubernetes", "nvidia", "snowpark", "slurm", "nvct"]
for decorator in next(
step for step in self.flow if step.name == self.step
).decorators
)
else None
)
def task_pre_step(
self,
step_name,
task_datastore,
meta,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
if self.disabled:
return
# Add Python interpreter's parent to the path to ensure that any non-pythonic
# dependencies in the virtual environment are visible to the user code.
# sys.executable points to the Python interpreter in the virtual environment
# since we are already inside the task context.
os.environ["PATH"] = os.pathsep.join(
filter(
None,
(
os.path.dirname(os.path.realpath(sys.executable)),
os.environ.get("PATH"),
),
)
)
# Infer environment prefix from Python interpreter
match = re.search(
r"(?:.*\/)(metaflow\/[^/]+\/[^/]+)(?=\/bin\/python)", sys.executable
)
if match:
meta.register_metadata(
run_id,
step_name,
task_id,
[
MetaDatum(
field="conda_env_prefix",
value=match.group(1),
type="conda_env_prefix",
tags=["attempt_id:{0}".format(retry_count)],
)
],
)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if self.disabled:
return
# Ensure local installation of Metaflow is visible to user code
python_path = self.__class__._metaflow_home.name
addl_env_vars = {}
if self.__class__._addl_env_vars:
for key, value in self.__class__._addl_env_vars.items():
if key.endswith(":"):
addl_env_vars[key[:-1]] = value
elif key == "PYTHONPATH":
addl_env_vars[key] = os.pathsep.join([value, python_path])
else:
addl_env_vars[key] = value
cli_args.env.update(addl_env_vars)
if self.interpreter:
# https://github.com/conda/conda/issues/7707
# Also ref - https://github.com/Netflix/metaflow/pull/178
cli_args.env["PYTHONNOUSERSITE"] = "1"
# The executable is already in place for the user code to execute against
cli_args.entrypoint[0] = self.interpreter
def runtime_finished(self, exception):
if self.disabled:
return
if self.__class__._metaflow_home is not None:
self.__class__._metaflow_home.cleanup()
self.__class__._metaflow_home = None
| CondaStepDecorator |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 69719,
"end": 73286
} | class ____:
def __init__(
self,
naming_authority: NamingAuthority | None,
profession_items: Iterable[str],
profession_oids: Iterable[ObjectIdentifier] | None,
registration_number: str | None,
add_profession_info: bytes | None,
) -> None:
if naming_authority is not None and not isinstance(
naming_authority, NamingAuthority
):
raise TypeError("naming_authority must be a NamingAuthority")
profession_items = list(profession_items)
if not all(isinstance(item, str) for item in profession_items):
raise TypeError(
"Every item in the profession_items list must be a str"
)
if profession_oids is not None:
profession_oids = list(profession_oids)
if not all(
isinstance(oid, ObjectIdentifier) for oid in profession_oids
):
raise TypeError(
"Every item in the profession_oids list must be an "
"ObjectIdentifier"
)
if registration_number is not None and not isinstance(
registration_number, str
):
raise TypeError("registration_number must be a str")
if add_profession_info is not None and not isinstance(
add_profession_info, bytes
):
raise TypeError("add_profession_info must be bytes")
self._naming_authority = naming_authority
self._profession_items = profession_items
self._profession_oids = profession_oids
self._registration_number = registration_number
self._add_profession_info = add_profession_info
@property
def naming_authority(self) -> NamingAuthority | None:
return self._naming_authority
@property
def profession_items(self) -> list[str]:
return self._profession_items
@property
def profession_oids(self) -> list[ObjectIdentifier] | None:
return self._profession_oids
@property
def registration_number(self) -> str | None:
return self._registration_number
@property
def add_profession_info(self) -> bytes | None:
return self._add_profession_info
def __repr__(self) -> str:
return (
f"<ProfessionInfo(naming_authority={self.naming_authority}, "
f"profession_items={self.profession_items}, "
f"profession_oids={self.profession_oids}, "
f"registration_number={self.registration_number}, "
f"add_profession_info={self.add_profession_info!r})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, ProfessionInfo):
return NotImplemented
return (
self.naming_authority == other.naming_authority
and self.profession_items == other.profession_items
and self.profession_oids == other.profession_oids
and self.registration_number == other.registration_number
and self.add_profession_info == other.add_profession_info
)
def __hash__(self) -> int:
if self.profession_oids is not None:
profession_oids = tuple(self.profession_oids)
else:
profession_oids = None
return hash(
(
self.naming_authority,
tuple(self.profession_items),
profession_oids,
self.registration_number,
self.add_profession_info,
)
)
| ProfessionInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 2608,
"end": 2849
} | class ____(TypedDict):
a: int
td10: TD10 = {"a": 0}
n1: TD8 = td10
# This should generate an error because "a" is writable
# and required in TD10 but writable and not required in
# TD9, which means it can be deleted.
n2: TD9 = td10
| TD10 |
python | doocs__leetcode | solution/3600-3699/3607.Power Grid Maintenance/Solution.py | {
"start": 563,
"end": 1285
} | class ____:
def processQueries(
self, c: int, connections: List[List[int]], queries: List[List[int]]
) -> List[int]:
uf = UnionFind(c + 1)
for u, v in connections:
uf.union(u, v)
st = [SortedList() for _ in range(c + 1)]
for i in range(1, c + 1):
st[uf.find(i)].add(i)
ans = []
for a, x in queries:
root = uf.find(x)
if a == 1:
if x in st[root]:
ans.append(x)
elif len(st[root]):
ans.append(st[root][0])
else:
ans.append(-1)
else:
st[root].discard(x)
return ans
| Solution |
python | pallets__click | src/click/utils.py | {
"start": 5591,
"end": 16109
} | class ____:
def __init__(self, file: t.IO[t.Any]) -> None:
self._file: t.IO[t.Any] = file
def __getattr__(self, name: str) -> t.Any:
return getattr(self._file, name)
def __enter__(self) -> KeepOpenFile:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
tb: TracebackType | None,
) -> None:
pass
def __repr__(self) -> str:
return repr(self._file)
def __iter__(self) -> cabc.Iterator[t.AnyStr]:
return iter(self._file)
def echo(
message: t.Any | None = None,
file: t.IO[t.Any] | None = None,
nl: bool = True,
err: bool = False,
color: bool | None = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# There are no standard streams attached to write to. For example,
# pythonw on Windows.
if file is None:
return
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: str | bytes | bytearray | None = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file, color) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
def get_binary_stream(name: t.Literal["stdin", "stdout", "stderr"]) -> t.BinaryIO:
"""Returns a system stream for byte processing.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener()
def get_text_stream(
name: t.Literal["stdin", "stdout", "stderr"],
encoding: str | None = None,
errors: str | None = "strict",
) -> t.TextIO:
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts for already
correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener(encoding, errors)
def open_file(
filename: str | os.PathLike[str],
mode: str = "r",
encoding: str | None = None,
errors: str | None = "strict",
lazy: bool = False,
atomic: bool = False,
) -> t.IO[t.Any]:
"""Open a file, with extra behavior to handle ``'-'`` to indicate
a standard stream, lazy open on write, and atomic write. Similar to
the behavior of the :class:`~click.File` param type.
If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is
wrapped so that using it in a context manager will not close it.
This makes it possible to use the function without accidentally
closing a standard stream:
.. code-block:: python
with open_file(filename) as f:
...
:param filename: The name or Path of the file to open, or ``'-'`` for
``stdin``/``stdout``.
:param mode: The mode in which to open the file.
:param encoding: The encoding to decode or encode a file opened in
text mode.
:param errors: The error handling mode.
:param lazy: Wait to open the file until it is accessed. For read
mode, the file is temporarily opened to raise access errors
early, then closed until it is read again.
:param atomic: Write to a temporary file and replace the given file
on close.
.. versionadded:: 3.0
"""
if lazy:
return t.cast(
"t.IO[t.Any]", LazyFile(filename, mode, encoding, errors, atomic=atomic)
)
f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
if not should_close:
f = t.cast("t.IO[t.Any]", KeepOpenFile(f))
return f
def format_filename(
filename: str | bytes | os.PathLike[str] | os.PathLike[bytes],
shorten: bool = False,
) -> str:
"""Format a filename as a string for display. Ensures the filename can be
displayed by replacing any invalid bytes or surrogate escapes in the name
with the replacement character ``�``.
Invalid bytes or surrogate escapes will raise an error when written to a
stream with ``errors="strict"``. This will typically happen with ``stdout``
when the locale is something like ``en_GB.UTF-8``.
Many scenarios *are* safe to write surrogates though, due to PEP 538 and
PEP 540, including:
- Writing to ``stderr``, which uses ``errors="backslashreplace"``.
- The system has ``LANG=C.UTF-8``, ``C``, or ``POSIX``. Python opens
stdout and stderr with ``errors="surrogateescape"``.
- None of ``LANG/LC_*`` are set. Python assumes ``LANG=C.UTF-8``.
- Python is started in UTF-8 mode with ``PYTHONUTF8=1`` or ``-X utf8``.
Python opens stdout and stderr with ``errors="surrogateescape"``.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
else:
filename = os.fspath(filename)
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(), "replace")
else:
filename = filename.encode("utf-8", "surrogateescape").decode(
"utf-8", "replace"
)
return filename
def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Windows (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Windows (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no effect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser("~")
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
if sys.platform == "darwin":
return os.path.join(
os.path.expanduser("~/Library/Application Support"), app_name
)
return os.path.join(
os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
_posixify(app_name),
)
| KeepOpenFile |
python | google__jax | jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_kernel.py | {
"start": 72372,
"end": 78591
} | class ____:
def __init__(
self,
fwd_mask_info: mask_info_lib.MaskInfo,
dq_mask_info: mask_info_lib.MaskInfo | None,
dkv_mask_info: mask_info_lib.MaskInfo | None,
**kwargs,
):
self.kwargs = kwargs
self.fwd_mask_info = fwd_mask_info
self.dq_mask_info = dq_mask_info
self.dkv_mask_info = dkv_mask_info
def __call__(self, *args, **kwargs) -> SplashCustomReturnType:
return _splash_attention(
self.fwd_mask_info,
self.dq_mask_info,
self.dkv_mask_info,
*args,
**kwargs,
**self.kwargs,
)
def manual_sharding_spec(self, sharding: jax.sharding.NamedSharding):
"""Returns a value that can be used as a shard_map partition spec for the kernel."""
if self.fwd_mask_info.data_next is not None:
block_mask_shape = self.fwd_mask_info.data_next.shape
try:
shard_shape = sharding.shard_shape(block_mask_shape)
except ValueError as exc:
raise ValueError(
"The sharding must divide the mask blocks evenly between devices"
) from exc
if block_mask_shape[-1] != shard_shape[-1]:
raise ValueError("Sharding the kv sequence dimension is not supported")
spec = sharding.spec
assert len(spec) == 2
replicated = jax.sharding.PartitionSpec()
partial_mask_blocks_spec = (
spec if self.fwd_mask_info.is_dynamic_mask else replicated
)
# Shard q_sequence over the sequence dimension only.
q_sequence_spec = jax.sharding.PartitionSpec(spec[1])
mask_info_specs = mask_info_lib.MaskInfo( # pytype: disable=wrong-arg-types
data_next=spec if self.fwd_mask_info.data_next is not None else None,
mask_next=spec if self.fwd_mask_info.mask_next is not None else None,
block_mask=spec if self.fwd_mask_info.block_mask is not None else None,
partial_mask_blocks=partial_mask_blocks_spec
if self.fwd_mask_info.partial_mask_blocks is not None
else None,
q_sequence=q_sequence_spec
if self.fwd_mask_info.q_sequence is not None
else None,
)
return SplashAttentionKernel(
mask_info_specs,
mask_info_specs if self.dq_mask_info is not None else None,
mask_info_specs if self.dkv_mask_info is not None else None,
**self.kwargs,
)
def tree_flatten(self):
return (
(self.fwd_mask_info, self.dq_mask_info, self.dkv_mask_info),
self.kwargs,
)
@classmethod
def tree_unflatten(cls, kwargs, values):
fwd_mask_info, dq_mask_info, dkv_mask_info = values
# NamedTuples are not preserved during pytree serialization.
dq_mask_info = (
mask_info_lib.MaskInfo(*dq_mask_info)
if dq_mask_info is not None
else None
)
dkv_mask_info = (
mask_info_lib.MaskInfo(*dkv_mask_info)
if dkv_mask_info is not None
else None
)
return SplashAttentionKernel(
mask_info_lib.MaskInfo(*fwd_mask_info),
dq_mask_info,
dkv_mask_info,
**kwargs,
)
def _make_splash_attention(
mask: np.ndarray | jax.Array | mask_lib.MultiHeadMask,
*,
block_sizes: BlockSizes | None = None,
is_mqa: bool,
save_residuals: bool = False,
mask_value: float = DEFAULT_MASK_VALUE,
attn_logits_soft_cap: float | None = None,
downcast_smem_data: bool = True,
head_shards: int,
q_seq_shards: int,
residual_checkpoint_name: str | None = None,
interpret: bool = False,
):
if len(mask.shape) != 3:
raise ValueError(f'Unexpected mask shape: {mask.shape}')
if isinstance(mask, np.ndarray):
mask = mask_lib.MultiHeadMask(
[mask_lib.NumpyMask(head_mask) for head_mask in mask]
)
if block_sizes is None:
block_sizes = BlockSizes.get_default()
process_mask_fn = (
mask_info_lib.process_dynamic_mask
if isinstance(mask, jax.Array)
else mask_info_lib.process_mask
)
process_mask_dvk_fn = (
mask_info_lib.process_dynamic_mask_dkv
if isinstance(mask, jax.Array)
else mask_info_lib.process_mask_dkv
)
fwd_mask_info, mask_function_fwd = process_mask_fn(
mask,
(block_sizes.block_q, block_sizes.block_kv),
downcast_smem_data=downcast_smem_data,
head_shards=head_shards,
q_seq_shards=q_seq_shards,
)
fwd_mask_info = tree_util.tree_map(jnp.array, fwd_mask_info)
dq_mask_info = None
dkv_mask_info = None
if block_sizes.has_backward_blocks:
if block_sizes.use_fused_bwd_kernel:
dq_mask_info = None
else:
bq_dq, bkv_dq = block_sizes.block_q_dq, block_sizes.block_kv_dq
dq_mask_info, mask_function_dq = process_mask_fn(
mask,
(bq_dq, bkv_dq),
downcast_smem_data=downcast_smem_data,
head_shards=head_shards,
q_seq_shards=q_seq_shards,
)
assert (mask_function_fwd is None) == (mask_function_dq is None)
dq_mask_info = tree_util.tree_map(jnp.array, dq_mask_info)
bq_dkv, bkv_dkv = block_sizes.block_q_dkv, block_sizes.block_kv_dkv
dkv_mask_info, mask_function_dkv = process_mask_dvk_fn(
mask,
(bq_dkv, bkv_dkv),
downcast_smem_data=downcast_smem_data,
head_shards=head_shards,
q_seq_shards=q_seq_shards,
shrink_grid=not block_sizes.use_fused_bwd_kernel,
)
assert (mask_function_fwd is None) == (mask_function_dkv is None)
dkv_mask_info = tree_util.tree_map(jnp.array, dkv_mask_info)
return SplashAttentionKernel(
fwd_mask_info,
dq_mask_info,
dkv_mask_info,
block_sizes=block_sizes,
is_mqa=is_mqa,
save_residuals=save_residuals,
mask_value=mask_value,
attn_logits_soft_cap=attn_logits_soft_cap,
residual_checkpoint_name=residual_checkpoint_name,
mask_function=mask_function_fwd,
interpret=interpret,
)
make_splash_mha = partial(_make_splash_attention, is_mqa=False)
make_splash_mqa = partial(_make_splash_attention, is_mqa=True)
make_splash_mha_single_device = partial(
make_splash_mha, is_mqa=False, head_shards=1, q_seq_shards=1
)
make_splash_mqa_single_device = partial(
make_splash_mha, is_mqa=True, head_shards=1, q_seq_shards=1
)
| SplashAttentionKernel |
python | getsentry__sentry | src/sentry/integrations/utils/metrics.py | {
"start": 16552,
"end": 17331
} | class ____(EventLifecycleMetric):
"""An instance to be recorded of a integration proxy event."""
interaction_type: IntegrationProxyEventType
def get_metrics_domain(self) -> str:
return "integration_proxy"
def get_interaction_type(self) -> str:
return str(self.interaction_type)
def get_metric_key(self, outcome: EventLifecycleOutcome) -> str:
tokens = (self.get_metrics_domain(), self.interaction_type, str(outcome))
return ".".join(tokens)
def get_metric_tags(self) -> Mapping[str, str]:
return {
"interaction_type": self.interaction_type,
}
def get_extras(self) -> Mapping[str, Any]:
return {
"interaction_type": self.interaction_type,
}
| IntegrationProxyEvent |
python | zostera__django-bootstrap4 | example/app/forms.py | {
"start": 2732,
"end": 2772
} | class ____(TestForm):
pass
| ContactForm |
python | great-expectations__great_expectations | great_expectations/data_context/store/gx_cloud_store_backend.py | {
"start": 1176,
"end": 2275
} | class ____(str, Enum):
V0 = "V0"
V1 = "V1"
V2 = "V2"
def get_user_friendly_error_message(
http_exc: requests.exceptions.HTTPError, log_level: int = logging.WARNING
) -> str:
# TODO: define a GeCloud service/client for this & other related behavior
support_message = []
response: requests.Response = http_exc.response
logger.log(log_level, f"{http_exc.__class__.__name__}:{http_exc} - {response}")
request_id = response.headers.get("request-id", "")
if request_id:
support_message.append(f"Request-Id: {request_id}")
try:
error_json: ErrorPayload = http_exc.response.json()
if isinstance(error_json, list):
errors = error_json
else:
errors = error_json.get("errors")
if errors:
support_message.append(json.dumps(errors))
else:
support_message.append(json.dumps(error_json))
except json.JSONDecodeError:
support_message.append(f"Please contact the Great Expectations team at {SUPPORT_EMAIL}")
return " ".join(support_message)
| EndpointVersion |
python | lazyprogrammer__machine_learning_examples | rl3/es_mnist.py | {
"start": 1076,
"end": 3695
} | class ____:
def __init__(self, D, M, K):
self.D = D
self.M = M
self.K = K
def init(self):
D, M, K = self.D, self.M, self.K
self.W1 = np.random.randn(D, M) / np.sqrt(D)
self.b1 = np.zeros(M)
self.W2 = np.random.randn(M, K) / np.sqrt(M)
self.b2 = np.zeros(K)
def forward(self, X):
Z = np.tanh(X.dot(self.W1) + self.b1)
return softmax(Z.dot(self.W2) + self.b2)
def score(self, X, Y):
P = np.argmax(self.forward(X), axis=1)
return np.mean(Y == P)
def get_params(self):
# return a flat array of parameters
return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2])
def set_params(self, params):
# params is a flat list
# unflatten into individual weights
D, M, K = self.D, self.M, self.K
self.W1 = params[:D * M].reshape(D, M)
self.b1 = params[D * M:D * M + M]
self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K)
self.b2 = params[-K:]
def evolution_strategy(
f,
population_size,
sigma,
lr,
initial_params,
num_iters):
# assume initial params is a 1-D array
num_params = len(initial_params)
reward_per_iteration = np.zeros(num_iters)
params = initial_params
for t in range(num_iters):
t0 = datetime.now()
N = np.random.randn(population_size, num_params)
# ### slow way
# R = np.zeros(population_size) # stores the reward
# # loop through each "offspring"
# for j in range(population_size):
# params_try = params + sigma*N[j]
# R[j] = f(params_try)
### fast way
R = pool.map(f, [params + sigma*N[j] for j in range(population_size)])
R = np.array(R)
m = R.mean()
A = (R - m) / R.std()
reward_per_iteration[t] = m
params = params + lr/(population_size*sigma) * np.dot(N.T, A)
print("Iter:", t, "Avg Reward:", m, "Duration:", (datetime.now() - t0))
return params, reward_per_iteration
def reward_function(params):
model = ANN(D, M, K)
model.set_params(params)
# Ptrain = model.forward(Xtrain)
# return log_likelihood(Ytrain, Ptrain)
return model.score(Xtrain, Ytrain)
if __name__ == '__main__':
model = ANN(D, M, K)
model.init()
params = model.get_params()
best_params, rewards = evolution_strategy(
f=reward_function,
population_size=50,
sigma=0.1,
lr=0.2,
initial_params=params,
num_iters=600,
)
# plot the rewards per iteration
plt.plot(rewards)
plt.show()
# final train and test accuracy
model.set_params(best_params)
print("Train score:", model.score(Xtrain, Ytrain))
print("Test score:", model.score(Xtest, Ytest))
| ANN |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/data_version.py | {
"start": 9102,
"end": 9219
} | class ____(Enum):
MISSING = "MISSING"
STALE = "STALE"
FRESH = "FRESH"
@functools.total_ordering
| StaleStatus |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_required_Numpy.py | {
"start": 1309,
"end": 1762
} | class ____:
"""test_ignores_non_property_return_type_numpy
Example of a class function trying to use `type` as return
documentation in a numpy style docstring
"""
def foo_method(self): # [missing-return-doc, missing-return-type-doc]
"""int: docstring ...
Raises
------
RuntimeError
Always
"""
print(self)
raise RuntimeError()
return 10 # [unreachable]
| Foo |
python | pytorch__pytorch | test/inductor/test_torchinductor.py | {
"start": 10459,
"end": 10562
} | class ____(torch.nn.Module):
def forward(self, x):
return (x,)
@dataclasses.dataclass
| ToTuple |
python | tensorflow__tensorflow | configure.py | {
"start": 1651,
"end": 49467
} | class ____(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_s390x():
return platform.machine() == 's390x'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env {}="{}"'.format(var_name, str(var)))
def write_repo_env_to_bazelrc(config_name, var_name, var):
write_to_bazelrc(
'build:{} --repo_env {}="{}"'.format(config_name, var_name, str(var))
)
def run_shell(cmd, allow_non_zero=False, stderr=None):
if stderr is None:
stderr = sys.stdout
if allow_non_zero:
try:
output = subprocess.check_output(cmd, stderr=stderr)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd, stderr=stderr)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
stderr = open(os.devnull, 'wb')
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
],
stderr=stderr).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path,
'-c',
'import sysconfig;print(sysconfig.get_path("purelib")',
])
]
all_paths = set(python_paths + library_paths)
# Sort set so order is deterministic
all_paths = sorted(all_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'{}]: ').format(default_python_bin_path)
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: {} cannot be found.'.format(python_bin_path))
else:
print('{} is not executable. Is it the python binary?'.format(
python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [{}]\n'.format(python_lib_paths[0]))
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"{}"'.format(python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If chosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="{}"'.format(python_bin_path))
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with {} support?'.format(
query_item)
if not yes_reply:
yes_reply = '{} support will be enabled for TensorFlow.'.format(query_item)
if not no_reply:
no_reply = 'No {}'.format(yes_reply)
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: {}'.format(user_input_origin))
return var
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def retrieve_bazel_version():
"""Retrieve installed bazel version (or bazelisk).
Returns:
The bazel version detected.
"""
bazel_executable = shutil.which('bazel')
if bazel_executable is None:
bazel_executable = shutil.which('bazelisk')
if bazel_executable is None:
print('Cannot find bazel. Please install bazel/bazelisk.')
sys.exit(1)
stderr = open(os.devnull, 'wb')
curr_version = run_shell([bazel_executable, '--version'],
allow_non_zero=True,
stderr=stderr)
if curr_version.startswith('bazel '):
curr_version = curr_version.split('bazel ')[1]
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
return curr_version
print('You have bazel %s installed.' % curr_version)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
# On all other platforms, no longer use `-march=native` as this can result
# in instructions that are too modern being generated. Users that want
# maximum performance should compile TF in their environment and can pass
# `-march=native` there.
# See https://github.com/tensorflow/tensorflow/issues/45744 and duplicates
default_cc_opt_flags = '-Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
write_to_bazelrc('build:opt --host_copt=%s' % opt)
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
True,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang',
)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
# an intentionally empty value in the
# environment is not the same as no value
if var is None:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
if resolve_symlinks:
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
# Upon clang 19 drop the check for 16
default_clang_path = '/usr/lib/llvm-18/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = '/usr/lib/llvm-17/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = '/usr/lib/llvm-16/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = shutil.which('clang') or ''
clang_cuda_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='CLANG_CUDA_COMPILER_PATH',
var_default=default_clang_path,
ask_for_var='Please specify clang path that to be used as host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid clang path. %s cannot be found.',
)
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
return clang_cuda_compiler_path
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path.
"""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
write_action_env_to_bazelrc('ANDROID_NDK_VERSION', ndk_version)
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
meta = open(os.path.join(android_ndk_home_path, 'meta/platforms.json'))
platforms = json.load(meta)
meta.close()
aliases = platforms['aliases']
api_levels = sorted(list(set([aliases[i] for i in aliases])))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='21', # 21 is required for ARM64 support.
ask_for_var=(
'Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]'
)
% api_levels,
check_success=(lambda *_: True),
error_msg='Android-%s is not present in the NDK path.',
)
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = shutil.which('gcc') or ''
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host '
'compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def choose_compiler(environ_cp):
question = 'Do you want to use Clang to build TensorFlow?'
yes_reply = 'Clang will be used to compile TensorFlow.'
no_reply = 'GCC will be used to compile TensorFlow.'
var = int(
get_var(
environ_cp, 'TF_NEED_CLANG', None, True, question, yes_reply, no_reply
)
)
return var
def choose_compiler_Win(environ_cp):
question = 'Do you want to use Clang to build TensorFlow?'
yes_reply = 'Add "--config=win_clang" to compile TensorFlow with CLANG.'
no_reply = 'MSVC will be used to compile TensorFlow.'
var = int(
get_var(
environ_cp, 'TF_NEED_CLANG', None, True, question, yes_reply, no_reply
)
)
return var
def set_clang_compiler_path(environ_cp):
"""Set CLANG_COMPILER_PATH and environment variables.
Loop over user prompts for clang path until receiving a valid response.
Default is used if no input is given. Set CLANG_COMPILER_PATH and write
environment variables CC and BAZEL_COMPILER to .bazelrc.
Args:
environ_cp: (Dict) copy of the os.environ.
Returns:
string value for clang_compiler_path.
"""
# Default path if clang-18 is installed by using apt-get install
# remove 16 logic upon release of 19
default_clang_path = '/usr/lib/llvm-18/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = '/usr/lib/llvm-17/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = '/usr/lib/llvm-16/bin/clang'
if not os.path.exists(default_clang_path):
default_clang_path = shutil.which('clang') or ''
clang_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='CLANG_COMPILER_PATH',
var_default=default_clang_path,
ask_for_var='Please specify the path to clang executable.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg=(
'Invalid clang path. %s cannot be found. Note that TensorFlow now'
' requires clang to compile. You may override this behavior by'
' setting TF_NEED_CLANG=0'
),
)
write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)
write_to_bazelrc('build --repo_env=CC=%s' % clang_compiler_path)
write_to_bazelrc('build --repo_env=BAZEL_COMPILER=%s' % clang_compiler_path)
return clang_compiler_path
def set_clang_compiler_path_win(environ_cp):
"""Set CLANG_COMPILER_PATH and environment variables.
Loop over user prompts for clang path until receiving a valid response.
Default is used if no input is given. Set CLANG_COMPILER_PATH and write
environment variables CC and BAZEL_COMPILER to .bazelrc.
Args:
environ_cp: (Dict) copy of the os.environ.
Returns:
string value for clang_compiler_path.
"""
# Default path if clang-16 is installed by using apt-get install
default_clang_path = 'C:/Program Files/LLVM/bin/clang.exe'
if not os.path.exists(default_clang_path):
default_clang_path = shutil.which('clang') or ''
clang_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='CLANG_COMPILER_PATH',
var_default=default_clang_path,
ask_for_var='Please specify the path to clang executable.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg=(
'Invalid clang path. %s cannot be found. Note that Clang is now'
'preferred compiler. You may use MSVC by removing --config=win_clang'
),
)
write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)
write_to_bazelrc(f'build --repo_env=CC="{clang_compiler_path}"')
write_to_bazelrc(f'build --repo_env=BAZEL_COMPILER="{clang_compiler_path}"')
return clang_compiler_path
def retrieve_clang_version(clang_executable):
"""Retrieve installed clang version.
Args:
clang_executable: (String) path to clang executable
Returns:
The clang version detected.
"""
stderr = open(os.devnull, 'wb')
curr_version = run_shell([clang_executable, '--version'],
allow_non_zero=True,
stderr=stderr)
curr_version_split = curr_version.lower().split('clang version ')
if len(curr_version_split) > 1:
curr_version = curr_version_split[1].split()[0].split('git')
if len(curr_version) > 1:
print('WARNING: current clang installation is not a release version.\n')
curr_version = curr_version[0]
curr_version_int = convert_version_to_int(curr_version)
# Check if current clang version can be detected properly.
if not curr_version_int:
print('WARNING: current clang installation version unknown.\n')
return None
print('You have Clang %s installed.\n' % curr_version)
return curr_version
# Disable clang extension that rejects type definitions within offsetof.
# This was added in clang-16 by https://reviews.llvm.org/D133574.
# Still required for clang-17.
# Can be removed once upb is updated, since a type definition is used within
# offset of in the current version of ubp. See
# https://github.com/protocolbuffers/upb/blob/9effcbcb27f0a665f9f345030188c0b291e32482/upb/upb.c#L183.
def disable_clang_offsetof_extension(clang_version):
if int(clang_version.split('.')[0]) in (16, 17):
write_to_bazelrc('build --copt=-Wno-gnu-offsetof-extensions')
def set_hermetic_cuda_version(environ_cp):
"""Set HERMETIC_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the hermetic CUDA version you want to use '
'or leave empty to use the default version. '
)
hermetic_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'HERMETIC_CUDA_VERSION', ask_cuda_version, None
)
if hermetic_cuda_version:
environ_cp['HERMETIC_CUDA_VERSION'] = hermetic_cuda_version
write_repo_env_to_bazelrc(
'cuda', 'HERMETIC_CUDA_VERSION', hermetic_cuda_version
)
def set_hermetic_cudnn_version(environ_cp):
"""Set HERMETIC_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the hermetic cuDNN version you want to use '
'or leave empty to use the default version. '
)
hermetic_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'HERMETIC_CUDNN_VERSION', ask_cudnn_version, None
)
if hermetic_cudnn_version:
environ_cp['HERMETIC_CUDNN_VERSION'] = hermetic_cudnn_version
write_repo_env_to_bazelrc(
'cuda', 'HERMETIC_CUDNN_VERSION', hermetic_cudnn_version
)
def set_hermetic_cuda_compute_capabilities(environ_cp):
"""Set HERMETIC_CUDA_COMPUTE_CAPABILITIES."""
while True:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated CUDA compute capabilities '
'you want to build with.\nYou can find the compute capability of your '
'device at: https://developer.nvidia.com/cuda-gpus. Each capability '
'can be specified as "x.y" or "compute_xy" to include both virtual and'
' binary GPU code, or as "sm_xy" to only include the binary '
'code.\nPlease note that each additional compute capability '
'significantly increases your build time and binary size, and that '
'TensorFlow only supports compute capabilities >= 3.5 [Default is: '
'%s]: ' % default_cuda_compute_capabilities)
hermetic_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp,
'HERMETIC_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities,
default_cuda_compute_capabilities,
)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
hermetic_cuda_compute_capabilities = ''.join(
hermetic_cuda_compute_capabilities.split()
)
for compute_capability in hermetic_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
# We now support sm_35,sm_50,sm_60,compute_70.
sm_compute_match = re.match('(sm|compute)_?([0-9]+[0-9]+)',
compute_capability)
if not sm_compute_match:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = int(sm_compute_match.group(2))
if ver < 30:
print(
'ERROR: TensorFlow only supports small CUDA compute'
' capabilities of sm_30 and higher. Please re-specify the list'
' of compute capabilities excluding version %s.' % ver)
all_valid = False
if ver < 35:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than sm_35. Disable XLA when running on older GPUs.')
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['HERMETIC_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set HERMETIC_CUDA_COMPUTE_CAPABILITIES
environ_cp['HERMETIC_CUDA_COMPUTE_CAPABILITIES'] = (
hermetic_cuda_compute_capabilities
)
write_repo_env_to_bazelrc(
'cuda',
'HERMETIC_CUDA_COMPUTE_CAPABILITIES',
hermetic_cuda_compute_capabilities,
)
def set_cuda_local_path(environ_cp, dist_name, env_var):
ask_path = (
'Please specify the local {} path you want to use '
'or leave empty to use the default version. '
).format(dist_name)
local_path = get_from_env_or_user_or_default(
environ_cp, env_var, ask_path, None
)
if local_path:
environ_cp[env_var] = local_path
write_repo_env_to_bazelrc('cuda', env_var, local_path)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def system_specific_test_config(environ_cp):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --test_size_filters=small,medium')
# Each instance of --test_tag_filters or --build_tag_filters overrides all
# previous instances, so we need to build up a complete list and write a
# single list of filters for the .bazelrc file.
# Filters to use with both --test_tag_filters and --build_tag_filters
test_and_build_filters = ['-benchmark-test', '-no_oss', '-oss_excluded']
# Additional filters for --test_tag_filters beyond those in
# test_and_build_filters
test_only_filters = ['-oss_serial']
if is_windows():
test_and_build_filters += ['-no_windows', '-windows_excluded']
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters += ['-no_windows_gpu', '-no_gpu']
else:
test_and_build_filters.append('-gpu')
elif is_macos():
test_and_build_filters += ['-gpu', '-nomac', '-no_mac', '-mac_excluded']
elif is_linux():
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters.append('-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
test_and_build_filters.append('-gpu')
# Disable tests with "v1only" tag in "v2" Bazel config, but not in "v1" config
write_to_bazelrc('test:v1 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters))
write_to_bazelrc('test:v1 --build_tag_filters=%s' %
','.join(test_and_build_filters))
write_to_bazelrc(
'test:v2 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters + ['-v1only']))
write_to_bazelrc('test:v2 --build_tag_filters=%s' %
','.join(test_and_build_filters + ['-v1only']))
def set_system_libs_flag(environ_cp):
"""Set system libs flags."""
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if is_s390x() and 'boringssl' not in syslibs:
syslibs = 'boringssl' + (', ' + syslibs if syslibs else '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
for varname in ('PREFIX', 'PROTOBUF_INCLUDE_PATH'):
if varname in environ_cp:
write_to_bazelrc('build --define=%s=%s' % (varname, environ_cp[varname]))
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# First available in VS 16.4. Speeds up Windows compile times by a lot. See
# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
# pylint: disable=line-too-long
write_to_bazelrc(
'build --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions'
)
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios(environ_cp):
"""Configures TensorFlow for iOS builds."""
if not is_macos():
return
if not get_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False):
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def get_gcc_compiler(environ_cp):
gcc_env = environ_cp.get('CXX') or environ_cp.get('CC') or shutil.which('gcc')
if gcc_env is not None:
gcc_version = run_shell([gcc_env, '--version']).split()
if gcc_version[0] in ('gcc', 'g++'):
return gcc_env
return None
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
try:
current_bazel_version = retrieve_bazel_version()
except subprocess.CalledProcessError as e:
print('Error retrieving bazel version: ', e.output.decode('UTF-8').strip())
raise e
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
if is_ppc64le():
# Enable MMA Dynamic Dispatch support if 'gcc' and if linker >= 2.35
gcc_env = get_gcc_compiler(environ_cp)
if gcc_env is not None:
# Use gold linker if 'gcc' and if 'ppc64le'
write_to_bazelrc('build --linkopt="-fuse-ld=gold"')
# Get the linker version
ld_version = run_shell([gcc_env, '-Wl,-version']).split()
ld_version_int = 0
for i in range(len(ld_version)):
ld_version_int = convert_version_to_int(ld_version[i])
if ld_version_int is not None:
break
if ld_version_int is None:
ld_version_int = 0
# Enable if 'ld' version >= 2.35
if ld_version_int >= 2035000:
write_to_bazelrc(
'build --copt="-DEIGEN_ALTIVEC_ENABLE_MMA_DYNAMIC_DISPATCH=1"')
with_xla_support = environ_cp.get('TF_ENABLE_XLA', None)
if with_xla_support is not None:
write_to_bazelrc('build --define=with_xla_support=%s' %
('true' if int(with_xla_support) else 'false'))
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('ROCM_PATH')):
write_action_env_to_bazelrc('ROCM_PATH', environ_cp.get('ROCM_PATH'))
if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('HIP_PLATFORM')):
write_action_env_to_bazelrc('HIP_PLATFORM', environ_cp.get('HIP_PLATFORM'))
if is_windows():
print('\nWARNING: Cannot build with CUDA support on Windows.\n'
'Starting in TF 2.11, CUDA build is not supported for Windows. '
'For using TensorFlow GPU on Windows, you will need to build/install '
'TensorFlow in WSL2.\n')
environ_cp['TF_NEED_CUDA'] = '0'
else:
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if environ_cp.get('TF_NEED_CUDA') == '1':
set_hermetic_cuda_version(environ_cp)
set_hermetic_cudnn_version(environ_cp)
set_hermetic_cuda_compute_capabilities(environ_cp)
set_cuda_local_path(environ_cp, 'CUDA', 'LOCAL_CUDA_PATH')
set_cuda_local_path(environ_cp, 'CUDNN', 'LOCAL_CUDNN_PATH')
set_cuda_local_path(environ_cp, 'NCCL', 'LOCAL_NCCL_PATH')
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Set up which clang we should use as the cuda / host compiler.
clang_cuda_compiler_path = set_clang_cuda_compiler_path(environ_cp)
clang_version = retrieve_clang_version(clang_cuda_compiler_path)
disable_clang_offsetof_extension(clang_version)
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should use clang for the CPU build.
if is_linux():
environ_cp['TF_NEED_CLANG'] = str(choose_compiler(environ_cp))
if environ_cp.get('TF_NEED_CLANG') == '1':
clang_compiler_path = set_clang_compiler_path(environ_cp)
clang_version = retrieve_clang_version(clang_compiler_path)
disable_clang_offsetof_extension(clang_version)
if is_windows():
environ_cp['TF_NEED_CLANG'] = str(choose_compiler_Win(environ_cp))
if environ_cp.get('TF_NEED_CLANG') == '1':
clang_compiler_path = set_clang_compiler_path_win(environ_cp)
clang_version = retrieve_clang_version(clang_compiler_path)
disable_clang_offsetof_extension(clang_version)
# ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(environ_cp)
configure_ios(environ_cp)
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line(
'mkl_aarch64',
'Build with oneDNN and Compute Library for the Arm Architecture (ACL).')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v1', 'Build with TensorFlow 1 API instead of TF 2 API.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
| UserInputError |
python | conda__conda | conda/exceptions.py | {
"start": 32131,
"end": 32275
} | class ____(CondaError, IndexError):
def __init__(self, message: str):
msg = f"{message}"
super().__init__(msg)
| CondaIndexError |
python | Textualize__textual | docs/examples/guide/widgets/hello05.py | {
"start": 363,
"end": 680
} | class ____(Static):
"""Display a greeting."""
def on_mount(self) -> None:
self.action_next_word()
def action_next_word(self) -> None:
"""Get a new hello and update the content area."""
hello = next(hellos)
self.update(f"[@click='next_word']{hello}[/], [b]World[/b]!")
| Hello |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 7622,
"end": 7815
} | class ____(PersistentConnectionError, Web3ValueError):
"""
Raised when the read buffer limit is reached while reading data from a persistent
connection.
"""
| ReadBufferLimitReached |
python | walkccc__LeetCode | solutions/3208. Alternating Groups II/3208.py | {
"start": 0,
"end": 339
} | class ____:
def numberOfAlternatingGroups(self, colors: list[int], k: int) -> int:
n = len(colors)
ans = 0
alternating = 1
for i in range(n + k - 2):
alternating = (1 if colors[i % n] == colors[(i - 1) % n]
else alternating + 1)
if alternating >= k:
ans += 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 28246,
"end": 28648
} | class ____(SemiIncrementalMixin, GithubStream):
"""
API docs: https://docs.github.com/en/rest/issues/events?apiVersion=2022-11-28#list-issue-events-for-a-repository
"""
cursor_field = "created_at"
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/issues/events"
# Below are incremental streams
| IssueEvents |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 98713,
"end": 116770
} | class ____(Response):
"""
Response of dataviews.get_all endpoint.
:param dataviews: List of dataviews
:type dataviews: Sequence[Dataview]
:param scroll_id: Scroll ID that can be used with the next calls to get_all to
retrieve more data
:type scroll_id: str
"""
_service = "dataviews"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dataview": {
"properties": {
"augmentation": {
"$ref": "#/definitions/augmentation",
"description": "Augmentation parameters. Only for training and testing tasks.",
},
"company": {"description": "Company id", "type": "string"},
"created": {
"description": "Dataview creation time (UTC) ",
"format": "date-time",
"type": "string",
},
"description": {
"description": "Dataview description",
"type": "string",
},
"filters": {
"description": "List of FilterRule ('OR' connection)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": "array",
},
"id": {"description": "Dataview ID", "type": "string"},
"iteration": {
"$ref": "#/definitions/iteration",
"description": "Iteration parameters. Not applicable for register (import) tasks.",
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": "object",
},
"mapping": {
"$ref": "#/definitions/mapping",
"description": "Mapping parameters",
},
"name": {"description": "Dataview name", "type": "string"},
"output_rois": {
"$ref": "#/definitions/output_rois_enum",
"default": "all_in_frame",
"description": (
"'all_in_frame' - all rois for a frame are returned\n 'only_filtered' - only"
" rois which led this frame to be selected\n 'frame_per_roi' - single roi"
" per frame. Frame can be returned multiple times with a different roi each time.\n "
" Note: this should be used for Training tasks only\n Note:"
" frame_per_roi implies that only filtered rois will be returned\n "
),
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"user": {"description": "Associated user id", "type": "string"},
"versions": {
"description": "List of dataview entries. All tasks must have at least one dataview.",
"items": {"$ref": "#/definitions/dataview_entry"},
"type": "array",
},
},
"required": ["id", "name"],
"type": "object",
},
"dataview_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": "string",
},
"merge_with": {
"description": "Version ID to merge with",
"type": "string",
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": "string",
},
},
"required": ["dataset", "version"],
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
},
"properties": {
"dataviews": {
"description": "List of dataviews",
"items": {"$ref": "#/definitions/dataview"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all to retrieve more data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, dataviews=None, scroll_id=None, **kwargs):
super(GetAllResponse, self).__init__(**kwargs)
self.dataviews = dataviews
self.scroll_id = scroll_id
@schema_property("dataviews")
def dataviews(self):
return self._property_dataviews
@dataviews.setter
def dataviews(self, value):
if value is None:
self._property_dataviews = None
return
self.assert_isinstance(value, "dataviews", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Dataview.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "dataviews", Dataview, is_array=True)
self._property_dataviews = value
@schema_property("scroll_id")
def scroll_id(self):
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value):
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetAllResponse |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 179237,
"end": 180719
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
start_date: str,
is_sandbox: bool,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
refresh_token: Optional[str] = None,
):
"""Airbyte Source for Paypal Transaction.
Documentation can be found at https://docs.airbyte.com/integrations/sources/paypal-transaction
Args:
name (str): The name of the destination.
client_id (Optional[str]): The Client ID of your Paypal developer application.
client_secret (Optional[str]): The Client Secret of your Paypal developer application.
refresh_token (Optional[str]): The key to refresh the expired access token.
start_date (str): Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time.
is_sandbox (bool): Determines whether to use the sandbox or production environment.
"""
self.client_id = check.opt_str_param(client_id, "client_id")
self.client_secret = check.opt_str_param(client_secret, "client_secret")
self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
self.start_date = check.str_param(start_date, "start_date")
self.is_sandbox = check.bool_param(is_sandbox, "is_sandbox")
super().__init__("Paypal Transaction", name)
| PaypalTransactionSource |
python | bokeh__bokeh | src/bokeh/core/has_props.py | {
"start": 27682,
"end": 27744
} | class ____(TypedDict):
name: str
default: Any
| OverrideDef |
python | huggingface__transformers | src/transformers/models/openai/tokenization_openai.py | {
"start": 1088,
"end": 4997
} | class ____(TokenizersBackend):
"""
Construct a GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
the following peculiarities:
- lower case all inputs
- uses BERT's BasicTokenizer for pre-BPE tokenization
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, a blank vocabulary is initialized.
merges (`list`, *optional*):
Custom merges list. If not provided, an empty list is used.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
unk_token="<unk>",
vocab=None,
merges=None,
vocab_file=None,
merges_file=None,
**kwargs,
):
# Initialize vocabulary
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
# Initialize minimal vocabulary with unk token
self._vocab = {str(unk_token): 0}
# Initialize merges
if merges is not None:
self._merges = merges if merges is not None else generate_merges(self._vocab)
else:
self._merges = []
# Create BPE tokenizer
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="</w>",
fuse_unk=False,
unk_token=str(unk_token),
)
)
# Set normalizer and pre-tokenizer to mimic OpenAI GPT behavior
# OpenAI GPT uses BERT BasicTokenizer with lower_case=True
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.NFD(),
normalizers.Lowercase(),
normalizers.StripAccents(),
]
)
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
self._tokenizer.decoder = decoders.BPEDecoder(suffix="</w>")
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
unk_token=unk_token,
**kwargs,
)
self.vocab_file = vocab_file
self.merges_file = merges_file
def _post_init(self):
"""Post-initialization to ensure tokenizer settings are applied correctly."""
# Re-apply settings to ensure they're correct after loading from pretrained
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.NFD(),
normalizers.Lowercase(),
normalizers.StripAccents(),
]
)
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
self._tokenizer.decoder = decoders.BPEDecoder(suffix="</w>")
# Call parent to handle AddedToken properties
super()._post_init()
@property
def do_lower_case(self):
return True
__all__ = ["OpenAIGPTTokenizer"]
| OpenAIGPTTokenizer |
python | yandexdataschool__Practical_RL | week06_policy_based/atari_wrappers.py | {
"start": 7128,
"end": 7292
} | class ____(RewardWrapper):
"""Modifes reward to be in {-1, 0, 1} by taking sign of it."""
def reward(self, reward):
return np.sign(reward)
| ClipReward |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 17036,
"end": 18553
} | class ____(BaseModel):
names: list[SelfReferencing] # noqa: F821
"""
)
SelfReferencing = module.SelfReferencing
if sys.version_info >= (3, 10):
assert (
repr(SelfReferencing.model_fields['names']) == 'FieldInfo(annotation=list[SelfReferencing], required=True)'
)
# test that object creation works
obj = SelfReferencing(names=[SelfReferencing(names=[])])
assert obj.names == [SelfReferencing(names=[])]
def test_pep585_recursive_generics(create_module):
@create_module
def module():
from typing import ForwardRef
from pydantic import BaseModel
HeroRef = ForwardRef('Hero')
class Team(BaseModel):
name: str
heroes: list[HeroRef]
class Hero(BaseModel):
name: str
teams: list[Team]
Team.model_rebuild()
assert repr(module.Team.model_fields['heroes']) == 'FieldInfo(annotation=list[Hero], required=True)'
assert repr(module.Hero.model_fields['teams']) == 'FieldInfo(annotation=list[Team], required=True)'
h = module.Hero(name='Ivan', teams=[module.Team(name='TheBest', heroes=[])])
# insert_assert(h.model_dump())
assert h.model_dump() == {'name': 'Ivan', 'teams': [{'name': 'TheBest', 'heroes': []}]}
def test_class_var_forward_ref(create_module):
# see #3679
create_module(
# language=Python
"""
from __future__ import annotations
from typing import ClassVar
from pydantic import BaseModel
| SelfReferencing |
python | pennersr__django-allauth | allauth/account/forms.py | {
"start": 26382,
"end": 26873
} | class ____(forms.Form):
password = PasswordField(label=_("Password"), autocomplete="current-password")
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data.get("password")
if not get_adapter().reauthenticate(self.user, password):
raise get_adapter().validation_error("incorrect_password")
return password
| ReauthenticateForm |
python | jazzband__django-waffle | waffle/models.py | {
"start": 13579,
"end": 15406
} | class ____(BaseModel):
"""A feature switch.
Switches are active, or inactive, globally.
"""
name = models.CharField(
max_length=100,
unique=True,
help_text=_('The human/computer readable name.'),
verbose_name=_('Name'),
)
active = models.BooleanField(
default=False,
help_text=_('Is this switch active?'),
verbose_name=_('Active'),
)
note = models.TextField(
blank=True,
help_text=_('Note where this Switch is used.'),
verbose_name=_('Note'),
)
created = models.DateTimeField(
default=timezone.now,
db_index=True,
help_text=_('Date when this Switch was created.'),
verbose_name=_('Created'),
)
modified = models.DateTimeField(
default=timezone.now,
help_text=_('Date when this Switch was last modified.'),
verbose_name=_('Modified'),
)
objects = managers.SwitchManager()
SINGLE_CACHE_KEY = 'SWITCH_CACHE_KEY'
ALL_CACHE_KEY = 'ALL_SWITCHES_CACHE_KEY'
class Meta:
abstract = True
verbose_name = _('Switch')
verbose_name_plural = _('Switches')
def is_active(self) -> bool:
if not self.pk:
log_level = get_setting('LOG_MISSING_SWITCHES')
if log_level:
logger.log(log_level, 'Switch %s not found', self.name)
if get_setting('CREATE_MISSING_SWITCHES'):
switch, _created = get_waffle_switch_model().objects.get_or_create(
name=self.name, defaults={"active": get_setting("SWITCH_DEFAULT")}
)
cache = get_cache()
cache.set(self._cache_key(self.name), switch)
return get_setting('SWITCH_DEFAULT')
return self.active
| AbstractBaseSwitch |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 1846,
"end": 2071
} | class ____(abc.ABC):
def __init__(self, param: int) -> None:
self.param = param + 1
def abstract_method(self) -> str:
"""This needs to be implemented."""
raise NotImplementedError()
| AbstractBase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.