language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numpy__numpy | benchmarks/benchmarks/bench_reduce.py | {
"start": 607,
"end": 1063
} | class ____(Benchmark):
def setup(self):
# avoid np.zeros's lazy allocation that would
# cause page faults during benchmark
self.zeros = np.full(100000, 0, bool)
self.ones = np.full(100000, 1, bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
| AnyAll |
python | kamyu104__LeetCode-Solutions | Python/prison-cells-after-n-days.py | {
"start": 424,
"end": 1111
} | class ____(object):
def prisonAfterNDays(self, cells, N):
"""
:type cells: List[int]
:type N: int
:rtype: List[int]
"""
cells = tuple(cells)
lookup = {}
while N:
lookup[cells] = N
N -= 1
cells = tuple([0] + [cells[i - 1] ^ cells[i + 1] ^ 1 for i in xrange(1, 7)] + [0])
if cells in lookup:
assert(lookup[cells] - N in (1, 7, 14))
N %= lookup[cells] - N
break
while N:
N -= 1
cells = tuple([0] + [cells[i - 1] ^ cells[i + 1] ^ 1 for i in xrange(1, 7)] + [0])
return list(cells)
| Solution2 |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 150677,
"end": 150971
} | class ____(str, Enum):
"""
Storage in chunked mmap files, appendable Search performance is defined by disk speed and the fraction of vectors that fit in memory.
"""
def __str__(self) -> str:
return str(self.value)
CHUNKEDMMAP = "ChunkedMmap"
| VectorStorageTypeOneOf2 |
python | getsentry__sentry | tests/sentry/auth/test_helper.py | {
"start": 21341,
"end": 23397
} | class ____(TestCase):
def setUp(self) -> None:
self.provider = "dummy"
self.auth_provider_inst = AuthProvider.objects.create(
organization_id=self.organization.id, provider=self.provider
)
self.auth_key = "test_auth_key"
self.request = _set_up_request()
self.request.session["auth_key"] = self.auth_key
def _test_pipeline(self, flow, referrer=None):
initial_state = {
"org_id": self.organization.id,
"flow": flow,
"provider_model_id": self.auth_provider_inst.id,
"provider_key": None,
"referrer": referrer,
}
local_client = clusters.get("default").get_local_client_for_key(self.auth_key)
local_client.set(self.auth_key, json.dumps(initial_state))
helper = AuthHelper.get_for_request(self.request)
assert helper is not None
helper.initialize()
assert helper.is_valid()
assert helper.referrer == referrer
assert helper.flow == flow
first_step = helper.current_step()
assert first_step.status_code == 200
next_step = helper.next_step()
assert next_step.status_code == 302
return next_step
@mock.patch("sentry.auth.helper.messages")
def test_login(self, mock_messages: mock.MagicMock) -> None:
final_step = self._test_pipeline(FLOW_LOGIN)
assert final_step.url == f"/auth/login/{self.organization.slug}/"
@mock.patch("sentry.auth.helper.messages")
def test_setup_provider(self, mock_messages: mock.MagicMock) -> None:
final_step = self._test_pipeline(FLOW_SETUP_PROVIDER)
assert final_step.url == f"/settings/{self.organization.slug}/auth/"
@mock.patch("sentry.auth.helper.messages")
def test_referrer_state(self, mock_messages: mock.MagicMock) -> None:
final_step = self._test_pipeline(flow=FLOW_SETUP_PROVIDER, referrer="foobar")
assert final_step.url == f"/settings/{self.organization.slug}/auth/"
@control_silo_test
| AuthHelperTest |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/subprocess_env_manager.py | {
"start": 9459,
"end": 22762
} | class ____(EnvManager):
def __init__(
self,
env_factory: Callable[[int, List[SideChannel]], BaseEnv],
run_options: RunOptions,
n_env: int = 1,
):
super().__init__()
self.env_workers: List[UnityEnvWorker] = []
self.step_queue: Queue = Queue()
self.workers_alive = 0
self.env_factory = env_factory
self.run_options = run_options
self.env_parameters: Optional[Dict] = None
# Each worker is correlated with a list of times they restarted within the last time period.
self.recent_restart_timestamps: List[List[datetime.datetime]] = [
[] for _ in range(n_env)
]
self.restart_counts: List[int] = [0] * n_env
for worker_idx in range(n_env):
self.env_workers.append(
self.create_worker(
worker_idx, self.step_queue, env_factory, run_options
)
)
self.workers_alive += 1
@staticmethod
def create_worker(
worker_id: int,
step_queue: Queue,
env_factory: Callable[[int, List[SideChannel]], BaseEnv],
run_options: RunOptions,
) -> UnityEnvWorker:
parent_conn, child_conn = Pipe()
# Need to use cloudpickle for the env factory function since function objects aren't picklable
# on Windows as of Python 3.6.
pickled_env_factory = cloudpickle.dumps(env_factory)
child_process = Process(
target=worker,
args=(
child_conn,
step_queue,
pickled_env_factory,
worker_id,
run_options,
logger.level,
),
)
child_process.start()
return UnityEnvWorker(child_process, worker_id, parent_conn)
def _queue_steps(self) -> None:
for env_worker in self.env_workers:
if not env_worker.waiting:
env_action_info = self._take_step(env_worker.previous_step)
env_worker.previous_all_action_info = env_action_info
env_worker.send(EnvironmentCommand.STEP, env_action_info)
env_worker.waiting = True
def _restart_failed_workers(self, first_failure: EnvironmentResponse) -> None:
if first_failure.cmd != EnvironmentCommand.ENV_EXITED:
return
# Drain the step queue to make sure all workers are paused and we have found all concurrent errors.
# Pausing all training is needed since we need to reset all pending training steps as they could be corrupted.
other_failures: Dict[int, Exception] = self._drain_step_queue()
# TODO: Once we use python 3.9 switch to using the | operator to combine dicts.
failures: Dict[int, Exception] = {
**{first_failure.worker_id: first_failure.payload},
**other_failures,
}
for worker_id, ex in failures.items():
self._assert_worker_can_restart(worker_id, ex)
logger.warning(f"Restarting worker[{worker_id}] after '{ex}'")
self.recent_restart_timestamps[worker_id].append(datetime.datetime.now())
self.restart_counts[worker_id] += 1
self.env_workers[worker_id] = self.create_worker(
worker_id, self.step_queue, self.env_factory, self.run_options
)
# The restarts were successful, clear all the existing training trajectories so we don't use corrupted or
# outdated data.
self.reset(self.env_parameters)
def _drain_step_queue(self) -> Dict[int, Exception]:
"""
Drains all steps out of the step queue and returns all exceptions from crashed workers.
This will effectively pause all workers so that they won't do anything until _queue_steps is called.
"""
all_failures = {}
workers_still_pending = {w.worker_id for w in self.env_workers if w.waiting}
deadline = datetime.datetime.now() + datetime.timedelta(minutes=1)
while workers_still_pending and deadline > datetime.datetime.now():
try:
while True:
step: EnvironmentResponse = self.step_queue.get_nowait()
if step.cmd == EnvironmentCommand.ENV_EXITED:
workers_still_pending.add(step.worker_id)
all_failures[step.worker_id] = step.payload
else:
workers_still_pending.remove(step.worker_id)
self.env_workers[step.worker_id].waiting = False
except EmptyQueueException:
pass
if deadline < datetime.datetime.now():
still_waiting = {w.worker_id for w in self.env_workers if w.waiting}
raise TimeoutError(f"Workers {still_waiting} stuck in waiting state")
return all_failures
def _assert_worker_can_restart(self, worker_id: int, exception: Exception) -> None:
"""
Checks if we can recover from an exception from a worker.
If the restart limit is exceeded it will raise a UnityCommunicationException.
If the exception is not recoverable it re-raises the exception.
"""
if (
isinstance(exception, UnityCommunicationException)
or isinstance(exception, UnityTimeOutException)
or isinstance(exception, UnityEnvironmentException)
or isinstance(exception, UnityCommunicatorStoppedException)
):
if self._worker_has_restart_quota(worker_id):
return
else:
logger.error(
f"Worker {worker_id} exceeded the allowed number of restarts."
)
raise exception
raise exception
def _worker_has_restart_quota(self, worker_id: int) -> bool:
self._drop_old_restart_timestamps(worker_id)
max_lifetime_restarts = self.run_options.env_settings.max_lifetime_restarts
max_limit_check = (
max_lifetime_restarts == -1
or self.restart_counts[worker_id] < max_lifetime_restarts
)
rate_limit_n = self.run_options.env_settings.restarts_rate_limit_n
rate_limit_check = (
rate_limit_n == -1
or len(self.recent_restart_timestamps[worker_id]) < rate_limit_n
)
return rate_limit_check and max_limit_check
def _drop_old_restart_timestamps(self, worker_id: int) -> None:
"""
Drops environment restart timestamps that are outside of the current window.
"""
def _filter(t: datetime.datetime) -> bool:
return t > datetime.datetime.now() - datetime.timedelta(
seconds=self.run_options.env_settings.restarts_rate_limit_period_s
)
self.recent_restart_timestamps[worker_id] = list(
filter(_filter, self.recent_restart_timestamps[worker_id])
)
def _step(self) -> List[EnvironmentStep]:
# Queue steps for any workers which aren't in the "waiting" state.
self._queue_steps()
worker_steps: List[EnvironmentResponse] = []
step_workers: Set[int] = set()
# Poll the step queue for completed steps from environment workers until we retrieve
# 1 or more, which we will then return as StepInfos
while len(worker_steps) < 1:
try:
while True:
step: EnvironmentResponse = self.step_queue.get_nowait()
if step.cmd == EnvironmentCommand.ENV_EXITED:
# If even one env exits try to restart all envs that failed.
self._restart_failed_workers(step)
# Clear state and restart this function.
worker_steps.clear()
step_workers.clear()
self._queue_steps()
elif step.worker_id not in step_workers:
self.env_workers[step.worker_id].waiting = False
worker_steps.append(step)
step_workers.add(step.worker_id)
except EmptyQueueException:
pass
step_infos = self._postprocess_steps(worker_steps)
return step_infos
def _reset_env(self, config: Optional[Dict] = None) -> List[EnvironmentStep]:
while any(ew.waiting for ew in self.env_workers):
if not self.step_queue.empty():
step = self.step_queue.get_nowait()
self.env_workers[step.worker_id].waiting = False
# Send config to environment
self.set_env_parameters(config)
# First enqueue reset commands for all workers so that they reset in parallel
for ew in self.env_workers:
ew.send(EnvironmentCommand.RESET, config)
# Next (synchronously) collect the reset observations from each worker in sequence
for ew in self.env_workers:
ew.previous_step = EnvironmentStep(ew.recv().payload, ew.worker_id, {}, {})
return list(map(lambda ew: ew.previous_step, self.env_workers))
def set_env_parameters(self, config: Dict = None) -> None:
"""
Sends environment parameter settings to C# via the
EnvironmentParametersSidehannel for each worker.
:param config: Dict of environment parameter keys and values
"""
self.env_parameters = config
for ew in self.env_workers:
ew.send(EnvironmentCommand.ENVIRONMENT_PARAMETERS, config)
def on_training_started(
self, behavior_name: str, trainer_settings: TrainerSettings
) -> None:
"""
Handle traing starting for a new behavior type. Generally nothing is necessary here.
:param behavior_name:
:param trainer_settings:
:return:
"""
for ew in self.env_workers:
ew.send(
EnvironmentCommand.TRAINING_STARTED, (behavior_name, trainer_settings)
)
@property
def training_behaviors(self) -> Dict[BehaviorName, BehaviorSpec]:
result: Dict[BehaviorName, BehaviorSpec] = {}
for worker in self.env_workers:
worker.send(EnvironmentCommand.BEHAVIOR_SPECS)
result.update(worker.recv().payload)
return result
def close(self) -> None:
logger.debug("SubprocessEnvManager closing.")
for env_worker in self.env_workers:
env_worker.request_close()
# Pull messages out of the queue until every worker has CLOSED or we time out.
deadline = time.time() + WORKER_SHUTDOWN_TIMEOUT_S
while self.workers_alive > 0 and time.time() < deadline:
try:
step: EnvironmentResponse = self.step_queue.get_nowait()
env_worker = self.env_workers[step.worker_id]
if step.cmd == EnvironmentCommand.CLOSED and not env_worker.closed:
env_worker.closed = True
self.workers_alive -= 1
# Discard all other messages.
except EmptyQueueException:
pass
self.step_queue.close()
# Sanity check to kill zombie workers and report an issue if they occur.
if self.workers_alive > 0:
logger.error("SubprocessEnvManager had workers that didn't signal shutdown")
for env_worker in self.env_workers:
if not env_worker.closed and env_worker.process.is_alive():
env_worker.process.terminate()
logger.error(
"A SubprocessEnvManager worker did not shut down correctly so it was forcefully terminated."
)
self.step_queue.join_thread()
def _postprocess_steps(
self, env_steps: List[EnvironmentResponse]
) -> List[EnvironmentStep]:
step_infos = []
timer_nodes = []
for step in env_steps:
payload: StepResponse = step.payload
env_worker = self.env_workers[step.worker_id]
new_step = EnvironmentStep(
payload.all_step_result,
step.worker_id,
env_worker.previous_all_action_info,
payload.environment_stats,
)
step_infos.append(new_step)
env_worker.previous_step = new_step
if payload.timer_root:
timer_nodes.append(payload.timer_root)
if timer_nodes:
with hierarchical_timer("workers") as main_timer_node:
for worker_timer_node in timer_nodes:
main_timer_node.merge(
worker_timer_node, root_name="worker_root", is_parallel=True
)
return step_infos
@timed
def _take_step(self, last_step: EnvironmentStep) -> Dict[BehaviorName, ActionInfo]:
all_action_info: Dict[str, ActionInfo] = {}
for brain_name, step_tuple in last_step.current_all_step_result.items():
if brain_name in self.policies:
all_action_info[brain_name] = self.policies[brain_name].get_action(
step_tuple[0], last_step.worker_id
)
return all_action_info
| SubprocessEnvManager |
python | scrapy__scrapy | tests/test_utils_datatypes.py | {
"start": 428,
"end": 5504
} | class ____(ABC):
@property
@abstractmethod
def dict_class(self) -> type[MutableMapping]:
raise NotImplementedError
def test_init_dict(self):
seq = {"red": 1, "black": 3}
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_pair_sequence(self):
seq = (("red", 1), ("black", 3))
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_mapping(self):
class MyMapping(Mapping):
def __init__(self, **kwargs):
self._d = kwargs
def __getitem__(self, key):
return self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
seq = MyMapping(red=1, black=3)
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_mutable_mapping(self):
class MyMutableMapping(MutableMapping):
def __init__(self, **kwargs):
self._d = kwargs
def __getitem__(self, key):
return self._d[key]
def __setitem__(self, key, value):
self._d[key] = value
def __delitem__(self, key):
del self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
seq = MyMutableMapping(red=1, black=3)
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_caseless(self):
d = self.dict_class()
d["key_Lower"] = 1
assert d["KEy_loWer"] == 1
assert d.get("KEy_loWer") == 1
d["KEY_LOWER"] = 3
assert d["key_Lower"] == 3
assert d.get("key_Lower") == 3
def test_delete(self):
d = self.dict_class({"key_lower": 1})
del d["key_LOWER"]
with pytest.raises(KeyError):
d["key_LOWER"]
with pytest.raises(KeyError):
d["key_lower"]
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_getdefault(self):
d = CaselessDict()
assert d.get("c", 5) == 5
d["c"] = 10
assert d.get("c", 5) == 10
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_setdefault(self):
d = CaselessDict({"a": 1, "b": 2})
r = d.setdefault("A", 5)
assert r == 1
assert d["A"] == 1
r = d.setdefault("c", 5)
assert r == 5
assert d["C"] == 5
def test_fromkeys(self):
keys = ("a", "b")
d = self.dict_class.fromkeys(keys)
assert d["A"] is None
assert d["B"] is None
d = self.dict_class.fromkeys(keys, 1)
assert d["A"] == 1
assert d["B"] == 1
instance = self.dict_class()
d = instance.fromkeys(keys)
assert d["A"] is None
assert d["B"] is None
d = instance.fromkeys(keys, 1)
assert d["A"] == 1
assert d["B"] == 1
def test_contains(self):
d = self.dict_class()
d["a"] = 1
assert "A" in d
def test_pop(self):
d = self.dict_class()
d["a"] = 1
assert d.pop("A") == 1
with pytest.raises(KeyError):
d.pop("A")
def test_normkey(self):
class MyDict(self.dict_class):
def _normkey(self, key):
return key.title()
normkey = _normkey # deprecated CaselessDict class
d = MyDict()
d["key-one"] = 2
assert list(d.keys()) == ["Key-One"]
def test_normvalue(self):
class MyDict(self.dict_class):
def _normvalue(self, value):
if value is not None:
return value + 1
return None
normvalue = _normvalue # deprecated CaselessDict class
d = MyDict({"key": 1})
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d["key"] = 1
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d.setdefault("key", 1)
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d.update({"key": 1})
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict.fromkeys(("key",), 1)
assert d["key"] == 2
assert d.get("key") == 2
def test_copy(self):
h1 = self.dict_class({"header1": "value"})
h2 = copy.copy(h1)
assert isinstance(h2, self.dict_class)
assert h1 == h2
assert h1.get("header1") == h2.get("header1")
assert h1.get("header1") == h2.get("HEADER1")
h3 = h1.copy()
assert isinstance(h3, self.dict_class)
assert h1 == h3
assert h1.get("header1") == h3.get("header1")
assert h1.get("header1") == h3.get("HEADER1")
| TestCaseInsensitiveDictBase |
python | django__django | tests/validation/test_constraints.py | {
"start": 264,
"end": 3786
} | class ____(TestCase):
@skipUnlessDBFeature("supports_table_check_constraints")
def test_full_clean_with_check_constraints(self):
product = Product(price=10, discounted_price=15)
with self.assertRaises(ValidationError) as cm:
product.full_clean()
self.assertEqual(
cm.exception.message_dict,
{
"__all__": [
"Constraint “price_gt_discounted_price_validation” is violated."
]
},
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_full_clean_with_check_constraints_on_child_model(self):
product = ChildProduct(price=10, discounted_price=15)
with self.assertRaises(ValidationError) as cm:
product.full_clean()
self.assertEqual(
cm.exception.message_dict,
{
"__all__": [
"Constraint “price_gt_discounted_price_validation” is violated."
]
},
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_full_clean_with_check_constraints_disabled(self):
product = Product(price=10, discounted_price=15)
product.full_clean(validate_constraints=False)
def test_full_clean_with_unique_constraints(self):
UniqueConstraintProduct.objects.create(name="product", color="yellow", rank=1)
tests = [
UniqueConstraintProduct(name="product", color="yellow", rank=1),
# Child model.
ChildUniqueConstraintProduct(name="product", color="yellow", rank=1),
]
for product in tests:
with self.subTest(model=product.__class__.__name__):
with self.assertRaises(ValidationError) as cm:
product.full_clean()
self.assertEqual(
cm.exception.message_dict,
{
"__all__": [
"Unique constraint product with this Name and Color "
"already exists."
],
"rank": [
"Unique constraint product with this Rank already exists."
],
},
)
def test_full_clean_with_unique_constraints_disabled(self):
UniqueConstraintProduct.objects.create(name="product", color="yellow", rank=1)
product = UniqueConstraintProduct(name="product", color="yellow", rank=1)
product.full_clean(validate_constraints=False)
@skipUnlessDBFeature("supports_partial_indexes")
def test_full_clean_with_partial_unique_constraints(self):
UniqueConstraintConditionProduct.objects.create(name="product")
product = UniqueConstraintConditionProduct(name="product")
with self.assertRaises(ValidationError) as cm:
product.full_clean()
self.assertEqual(
cm.exception.message_dict,
{
"__all__": [
"Constraint “name_without_color_uniq_validation” is violated."
]
},
)
@skipUnlessDBFeature("supports_partial_indexes")
def test_full_clean_with_partial_unique_constraints_disabled(self):
UniqueConstraintConditionProduct.objects.create(name="product")
product = UniqueConstraintConditionProduct(name="product")
product.full_clean(validate_constraints=False)
| PerformConstraintChecksTest |
python | kamyu104__LeetCode-Solutions | Python/max-chunks-to-make-sorted-ii.py | {
"start": 51,
"end": 543
} | class ____(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
result, increasing_stk = 0, []
for num in arr:
max_num = num if not increasing_stk else max(increasing_stk[-1], num)
while increasing_stk and increasing_stk[-1] > num:
increasing_stk.pop()
increasing_stk.append(max_num)
return len(increasing_stk)
# Time: O(nlogn)
# Space: O(n)
| Solution |
python | django__django | tests/migrations/migrations_test_apps/with_generic_model/models.py | {
"start": 499,
"end": 549
} | class ____(typing.Generic[T1, T2]):
pass
| Parent1 |
python | pandas-dev__pandas | pandas/core/arrays/integer.py | {
"start": 5504,
"end": 5693
} | class ____(IntegerDtype):
type = np.int32
name: ClassVar[str] = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
@set_module("pandas")
| Int32Dtype |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 15668,
"end": 15769
} | class ____(PydanticValueError):
msg_template = 'value is not a valid IPv4 network'
| IPv4NetworkError |
python | kamyu104__LeetCode-Solutions | Python/maximum-consecutive-floors-without-special-floors.py | {
"start": 40,
"end": 446
} | class ____(object):
def maxConsecutive(self, bottom, top, special):
"""
:type bottom: int
:type top: int
:type special: List[int]
:rtype: int
"""
special.sort()
result = max(special[0]-bottom, top-special[-1])
for i in xrange(1, len(special)):
result = max(result, special[i]-special[i-1]-1)
return result
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec3.py | {
"start": 939,
"end": 1576
} | class ____(Generic[P, R]):
def __init__(self, func: Callable[P, R]):
self.func = func
def func4(f: Callable[P, R]) -> ClassA[P, R]:
return ClassA(f)
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def decorator2(f: Callable[P, R]) -> Callable[P, R]:
return f
def func5(f: Callable[[], list[T1]]) -> Callable[[list[T2]], list[T1 | T2]]:
def inner(res: list[T2], /) -> list[T1 | T2]: ...
return decorator2(inner)
def func6(x: Iterable[Callable[P, None]]) -> Callable[P, None]:
def inner(*args: P.args, **kwargs: P.kwargs) -> None:
for fn in x:
fn(*args, **kwargs)
return inner
| ClassA |
python | viewflow__viewflow | viewflow/templatetags/viewflow.py | {
"start": 4280,
"end": 5714
} | class ____(template.Node):
"""
Render a django form using google material-components-web library.
Example:
{% render_form form [layout] %}
"""
default_layout = FormLayout()
def __init__(self, parser, token):
bits = token.split_contents()
layout_expr = None
if len(bits) == 2:
tag, form_expr = bits
elif len(bits) == 3:
tag, form_expr, layout_expr = bits
else:
raise template.TemplateSyntaxError(
"Invalid syntax in material tag, expects only form and optional layout arguments."
)
self.form_expr = parser.compile_filter(form_expr)
self.layout_expr = parser.compile_filter(layout_expr) if layout_expr else None
def render(self, context):
form = self.form_expr.resolve(context)
if not isinstance(form, forms.BaseForm):
raise template.TemplateSyntaxError(
"material tag first argument must be a form"
)
layout = None
if self.layout_expr:
layout = self.layout_expr.resolve(context)
if layout and not isinstance(layout, FormLayout):
raise template.TemplateSyntaxError(
"material tag second argument must be a layout"
)
return layout.render(form) if layout else self.default_layout.render(form)
@register.tag("get_absolute_url")
| FormNode |
python | pdm-project__pdm | src/pdm/exceptions.py | {
"start": 401,
"end": 447
} | class ____(PdmUsageError):
pass
| PublishError |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 1073,
"end": 1434
} | class ____(models.Model):
name = models.CharField(max_length=128, unique=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta:
get_latest_by = "created_at"
def __str__(self):
return self.name
Project.not_a_relation_descriptor = DynamicAccessor()
# Simple model for testing inline admin functionality
| Project |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchaudio_models.py | {
"start": 12789,
"end": 14581
} | class ____(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
| PositionalEncoding |
python | cython__cython | Cython/Compiler/ModuleNode.py | {
"start": 3690,
"end": 7618
} | class ____:
"""
Class responsible for generating code that imports and exports shared utility functions.
Mark the positions where the functions should be called with `call_import_code()`/`call_export_code()`.
The function calls and import/export functions are generated when `generate_exporting_functions()`
is called. This approach is needed because the list of the shared functions is only known in the later
stages of compilation.
"""
def __init__(self, pos, mod_init_subfunction, scope):
self.in_shared_utility_module = bool(scope.context.shared_c_file_path)
self.using_shared_utility_module = bool(scope.context.shared_utility_qualified_name)
self.pos = pos
self.scope = scope
self.import_code = mod_init_subfunction("Shared function import code")
self.export_code = mod_init_subfunction("Shared function export code")
def has_shared_exports(self, shared_func_definitions: Sequence[Code.SharedFunctionDecl]) -> bool:
return bool(self.in_shared_utility_module and shared_func_definitions)
def has_shared_imports(self, shared_func_definitions: Sequence[Code.SharedFunctionDecl]) -> bool:
return bool(self.using_shared_utility_module and shared_func_definitions)
def call_import_code(self, code):
self.import_code.set_call_code(code)
def call_export_code(self, code):
self.export_code.set_call_code(code)
def _generate_c_shared_function_export_code(self, code, shared_function_definitions: Sequence[Code.SharedFunctionDecl]):
# We use the function cname also as exported name.
exports = [
(f"{shared_func_def.ret}({shared_func_def.params})", shared_func_def.name, shared_func_def.name)
for shared_func_def in shared_function_definitions
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("FunctionExport", "ImportExport.c"))
_generate_export_code(code, self.pos, exports, "__Pyx_ExportFunction", "void (*{name})(void)")
def _generate_c_shared_function_import_code_for_module(self, code, function_definitions: Sequence[Code.SharedFunctionDecl]):
# We use the function cname also as exported name.
imports = [
(f"{shared_func_def.ret}({shared_func_def.params})", shared_func_def.name, shared_func_def.name)
for shared_func_def in function_definitions
]
code.globalstate.use_utility_code(
UtilityCode.load_cached("FunctionImport", "ImportExport.c"))
shared_utility_qualified_name = EncodedString(self.scope.context.shared_utility_qualified_name)
import_func = f"__Pyx_ImportFunction_{Naming.cyversion}"
_generate_import_code(
code, self.pos, imports, shared_utility_qualified_name, import_func, "void (**{name})(void)")
def _generate_exports(self, shared_utility_functions: Sequence[Code.SharedFunctionDecl]):
if self.has_shared_exports(shared_utility_functions):
with self.export_code as inner_code:
self._generate_c_shared_function_export_code(
inner_code,
shared_utility_functions
)
def _generate_imports(self, shared_utility_functions: Sequence[Code.SharedFunctionDecl]):
if self.has_shared_imports(shared_utility_functions):
with self.import_code as inner_code:
self._generate_c_shared_function_import_code_for_module(
inner_code,
shared_utility_functions
)
def generate_exporting_functions(self, code):
shared_utility_functions = code.globalstate.shared_utility_functions
code.enter_cfunc_scope(self.scope)
self._generate_exports(shared_utility_functions)
self._generate_imports(shared_utility_functions)
code.exit_cfunc_scope()
| SharedUtilityExporter |
python | sqlalchemy__sqlalchemy | test/orm/test_lockmode.py | {
"start": 4652,
"end": 12096
} | class ____(_fixtures.FixtureTest, AssertsCompiledSQL):
"""run some compile tests, even though these are redundant."""
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
cls.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_default_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_not_supported_by_dialect_should_just_use_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_postgres_read(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR SHARE",
dialect="postgresql",
)
def test_postgres_read_nowait(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR SHARE NOWAIT",
dialect="postgresql",
)
def test_postgres_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="postgresql",
)
def test_postgres_update_of(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User.id),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity_list(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
self.assert_compile(
sess.query(User.id, Address.id).with_for_update(
of=[User, Address]
),
"SELECT users.id AS users_id, addresses.id AS addresses_id "
"FROM users, addresses FOR UPDATE OF users, addresses",
dialect="postgresql",
)
def test_postgres_for_no_key_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE",
dialect="postgresql",
)
def test_postgres_for_no_key_nowait_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE NOWAIT",
dialect="postgresql",
)
def test_postgres_update_of_list(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(
of=[User.id, User.id, User.id]
),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_skip_locked(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="postgresql",
)
def test_oracle_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="oracle",
)
def test_oracle_update_skip_locked(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="oracle",
)
def test_mysql_read(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users LOCK IN SHARE MODE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT %s FOR UPDATE) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload_no_render_oracle(self):
from sqlalchemy.dialects import oracle
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT anon_2.users_id AS users_id, "
"anon_2.users_name AS users_name FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users) anon_2 WHERE ROWNUM <= "
"__[POSTCOMPILE_param_1]) anon_1 "
"LEFT OUTER JOIN addresses addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect=oracle.dialect(enable_offset_fetch=False),
)
| CompileTest |
python | numba__numba | numba/tests/doc_examples/test_numpy_generators.py | {
"start": 127,
"end": 1099
} | class ____(unittest.TestCase):
def test_numpy_gen_usage(self):
# magictoken.npgen_usage.begin
x = np.random.default_rng(1)
y = np.random.default_rng(1)
size = 10
@numba.njit
def do_stuff(gen):
return gen.random(size=int(size / 2))
original = x.random(size=size)
# [0.51182162 0.9504637 0.14415961 0.94864945 0.31183145
# 0.42332645 0.82770259 0.40919914 0.54959369 0.02755911]
numba_func_res = do_stuff(y)
# [0.51182162 0.9504637 0.14415961 0.94864945 0.31183145]
after_numba = y.random(size=int(size / 2))
# [0.42332645 0.82770259 0.40919914 0.54959369 0.02755911]
# magictoken.npgen_usage.end
numba_res = np.concatenate((numba_func_res, after_numba))
for _np_res, _nb_res in zip(original, numba_res):
self.assertEqual(_np_res, _nb_res)
if __name__ == '__main__':
unittest.main()
| NumpyGeneratorUsageTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 954761,
"end": 955505
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ReviewRequest."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ReviewRequestEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ReviewRequest"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ReviewRequestConnection |
python | python__mypy | mypy/test/testparse.py | {
"start": 2498,
"end": 3710
} | class ____(DataSuite):
required_out_section = True
base_path = "."
files = ["parse-errors.test"]
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_parse_error(testcase)
def test_parse_error(testcase: DataDrivenTestCase) -> None:
try:
options = parse_options("\n".join(testcase.input), testcase, 0)
if options.python_version != sys.version_info[:2]:
skip()
# Compile temporary file. The test file contains non-ASCII characters.
parse(
bytes("\n".join(testcase.input), "utf-8"),
INPUT_FILE_NAME,
"__main__",
errors=Errors(options),
options=options,
raise_on_error=True,
)
raise AssertionError("No errors reported")
except CompileError as e:
if e.module_with_blocker is not None:
assert e.module_with_blocker == "__main__"
# Verify that there was a compile error and that the error messages
# are equivalent.
assert_string_arrays_equal(
testcase.output,
e.messages,
f"Invalid compiler output ({testcase.file}, line {testcase.line})",
)
| ParseErrorSuite |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 2143,
"end": 2213
} | class ____:
@property
def real(self: _Self) -> _Self: ...
| Class5 |
python | apache__airflow | airflow-core/tests/unit/models/test_dag.py | {
"start": 96975,
"end": 113688
} | class ____:
def setup_method(self) -> None:
clear_db_runs()
clear_db_dags()
clear_db_dag_bundles()
def teardown_method(self) -> None:
clear_db_runs()
clear_db_dags()
clear_db_dag_bundles()
@pytest.mark.parametrize("tasks_count", [3, 12])
def test_count_number_queries(self, tasks_count, testing_dag_bundle):
dag = DAG("test_dagrun_query_count", schedule=None, start_date=DEFAULT_DATE)
for i in range(tasks_count):
EmptyOperator(task_id=f"dummy_task_{i}", owner="test", dag=dag)
scheduler_dag = sync_dag_to_db(dag)
with assert_queries_count(5):
scheduler_dag.create_dagrun(
run_id="test_dagrun_query_count",
run_type=DagRunType.MANUAL,
state=State.RUNNING,
logical_date=TEST_DATE,
data_interval=(TEST_DATE, TEST_DATE),
run_after=TEST_DATE,
triggered_by=DagRunTriggeredByType.TEST,
)
@pytest.mark.need_serialized_dag
@pytest.mark.parametrize(
"run_id",
["test-run-id"],
)
def test_set_task_instance_state(run_id, session, dag_maker):
"""Test that set_task_instance_state updates the TaskInstance state and clear downstream failed"""
start_date = datetime_tz(2020, 1, 1)
with dag_maker(
"test_set_task_instance_state",
start_date=start_date,
session=session,
serialized=True,
) as dag:
task_1 = EmptyOperator(task_id="task_1")
task_2 = EmptyOperator(task_id="task_2")
task_3 = EmptyOperator(task_id="task_3")
task_4 = EmptyOperator(task_id="task_4")
task_5 = EmptyOperator(task_id="task_5")
task_1 >> [task_2, task_3, task_4, task_5]
dagrun = dag_maker.create_dagrun(
run_id=run_id,
state=State.FAILED,
run_type=DagRunType.SCHEDULED,
)
def get_ti_from_db(task):
return (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == task.task_id,
TI.run_id == dagrun.run_id,
)
.one()
)
get_ti_from_db(task_1).state = State.FAILED
get_ti_from_db(task_2).state = State.SUCCESS
get_ti_from_db(task_3).state = State.UPSTREAM_FAILED
get_ti_from_db(task_4).state = State.FAILED
get_ti_from_db(task_5).state = State.SKIPPED
session.flush()
altered = dag.set_task_instance_state(
task_id=task_1.task_id,
run_id=run_id,
state=State.SUCCESS,
session=session,
)
# After _mark_task_instance_state, task_1 is marked as SUCCESS
ti1 = get_ti_from_db(task_1)
assert ti1.state == State.SUCCESS
# TIs should have DagRun pre-loaded
assert isinstance(inspect(ti1).attrs.dag_run.loaded_value, DagRun)
# task_2 remains as SUCCESS
assert get_ti_from_db(task_2).state == State.SUCCESS
# task_3 and task_4 are cleared because they were in FAILED/UPSTREAM_FAILED state
assert get_ti_from_db(task_3).state == State.NONE
assert get_ti_from_db(task_4).state == State.NONE
# task_5 remains as SKIPPED
assert get_ti_from_db(task_5).state == State.SKIPPED
dagrun.refresh_from_db(session=session)
# dagrun should be set to QUEUED
assert dagrun.get_state() == State.QUEUED
assert {tuple(t.key) for t in altered} == {
("test_set_task_instance_state", "task_1", dagrun.run_id, 0, -1)
}
@pytest.mark.need_serialized_dag
def test_set_task_instance_state_mapped(dag_maker, session):
"""Test that when setting an individual mapped TI that the other TIs are not affected"""
task_id = "t1"
# The catchup behavior isn't central to what's being tested. Setting catchup explicitly to True.
with dag_maker(session=session, catchup=True) as dag:
@task_decorator
def make_arg_lists():
return [[1], [2], [{"a": "b"}]]
def consumer(value):
print(value)
mapped = PythonOperator.partial(task_id=task_id, python_callable=consumer).expand(
op_args=make_arg_lists()
)
mapped >> BaseOperator(task_id="downstream")
dr1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=DagRunState.FAILED,
)
mapped = dag.get_task(task_id)
expand_mapped_task(mapped, dr1.run_id, "make_arg_lists", length=2, session=session)
# set_state(future=True) only applies to scheduled runs
dr2 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=DagRunState.FAILED,
logical_date=DEFAULT_DATE + datetime.timedelta(days=1),
)
expand_mapped_task(mapped, dr2.run_id, "make_arg_lists", length=2, session=session)
session.query(TI).filter_by(dag_id=dag.dag_id).update({"state": TaskInstanceState.FAILED})
ti_query = (
session.query(TI.task_id, TI.map_index, TI.run_id, TI.state)
.filter(TI.dag_id == dag.dag_id, TI.task_id.in_([task_id, "downstream"]))
.order_by(TI.run_id, TI.task_id, TI.map_index)
)
# Check pre-conditions
assert ti_query.all() == [
("downstream", -1, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 0, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr1.run_id, TaskInstanceState.FAILED),
("downstream", -1, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 0, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr2.run_id, TaskInstanceState.FAILED),
]
dag.set_task_instance_state(
task_id=task_id,
map_indexes=[1],
future=True,
run_id=dr1.run_id,
state=TaskInstanceState.SUCCESS,
session=session,
)
assert dr1 in session, "Check session is passed down all the way"
assert ti_query.all() == [
("downstream", -1, dr1.run_id, None),
(task_id, 0, dr1.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr1.run_id, TaskInstanceState.SUCCESS),
("downstream", -1, dr2.run_id, None),
(task_id, 0, dr2.run_id, TaskInstanceState.FAILED),
(task_id, 1, dr2.run_id, TaskInstanceState.SUCCESS),
]
def test_dag_teardowns_property_lists_all_teardown_tasks():
@setup
def setup_task():
return 1
@teardown
def teardown_task():
return 1
@teardown
def teardown_task2():
return 1
@teardown
def teardown_task3():
return 1
@task_decorator
def mytask():
return 1
with DAG("dag") as dag:
t1 = setup_task()
t2 = teardown_task()
t3 = teardown_task2()
t4 = teardown_task3()
with t1 >> t2:
with t3:
with t4:
mytask()
assert {t.task_id for t in dag.teardowns} == {"teardown_task", "teardown_task2", "teardown_task3"}
assert {t.task_id for t in dag.tasks_upstream_of_teardowns} == {"setup_task", "mytask"}
@pytest.mark.parametrize(
("start_date", "expected_infos"),
[
(
DEFAULT_DATE,
[DagRunInfo.interval(DEFAULT_DATE, DEFAULT_DATE + datetime.timedelta(hours=1))],
),
(
DEFAULT_DATE - datetime.timedelta(hours=3),
[
DagRunInfo.interval(
DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
),
DagRunInfo.interval(
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
),
DagRunInfo.interval(
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE,
),
DagRunInfo.interval(
DEFAULT_DATE,
DEFAULT_DATE + datetime.timedelta(hours=1),
),
],
),
],
ids=["in-dag-restriction", "out-of-dag-restriction"],
)
def test_iter_dagrun_infos_between(start_date, expected_infos):
dag = DAG(dag_id="test_get_dates", start_date=DEFAULT_DATE, schedule="@hourly")
EmptyOperator(task_id="dummy", dag=dag)
iterator = create_scheduler_dag(dag).iter_dagrun_infos_between(
earliest=pendulum.instance(start_date),
latest=pendulum.instance(DEFAULT_DATE),
align=True,
)
assert expected_infos == list(iterator)
def test_iter_dagrun_infos_between_error(caplog):
start = pendulum.instance(DEFAULT_DATE - datetime.timedelta(hours=1))
end = pendulum.instance(DEFAULT_DATE)
class FailingAfterOneTimetable(Timetable):
def next_dagrun_info(self, last_automated_data_interval, restriction):
if last_automated_data_interval is None:
return DagRunInfo.interval(start, end)
raise RuntimeError("this fails")
def _get_registered_timetable(s):
if s == "unit.models.test_dag.FailingAfterOneTimetable":
return FailingAfterOneTimetable
raise ValueError(f"unexpected class {s!r}")
dag = DAG(
dag_id="test_iter_dagrun_infos_between_error",
start_date=DEFAULT_DATE,
schedule=FailingAfterOneTimetable(),
)
with mock.patch(
"airflow.serialization.serialized_objects._get_registered_timetable",
_get_registered_timetable,
):
scheduler_dag = create_scheduler_dag(dag)
iterator = scheduler_dag.iter_dagrun_infos_between(earliest=start, latest=end, align=True)
with caplog.at_level(logging.ERROR):
infos = list(iterator)
# The second timetable.next_dagrun_info() call raises an exception, so only the first result is returned.
assert infos == [DagRunInfo.interval(start, end)]
assert caplog.record_tuples == [
(
"airflow.serialization.serialized_objects",
logging.ERROR,
f"Failed to fetch run info after data interval {DataInterval(start, end)} for DAG {dag.dag_id!r}",
),
]
assert caplog.entries[0].get("exc_info") is not None, "should contain exception context"
@pytest.mark.parametrize(
("logical_date", "data_interval_start", "data_interval_end", "expected_data_interval"),
[
pytest.param(None, None, None, None, id="no-next-run"),
pytest.param(
DEFAULT_DATE,
DEFAULT_DATE,
DEFAULT_DATE + timedelta(days=2),
DataInterval(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=2)),
id="modern",
),
pytest.param(
DEFAULT_DATE,
None,
None,
DataInterval(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=1)),
id="legacy",
),
],
)
def test_get_next_data_interval(
logical_date,
data_interval_start,
data_interval_end,
expected_data_interval,
):
dag = DAG(dag_id="test_get_next_data_interval", schedule="@daily", start_date=DEFAULT_DATE)
dag_model = DagModel(
dag_id="test_get_next_data_interval",
bundle_name="dags-folder",
next_dagrun=logical_date,
next_dagrun_data_interval_start=data_interval_start,
next_dagrun_data_interval_end=data_interval_end,
)
assert get_next_data_interval(dag.timetable, dag_model) == expected_data_interval
@pytest.mark.need_serialized_dag
@pytest.mark.parametrize(
("dag_date", "tasks_date", "catchup", "restrict"),
[
# catchup=True cases - respects task start dates
[
(DEFAULT_DATE, None),
[
(DEFAULT_DATE + timedelta(days=1), DEFAULT_DATE + timedelta(days=2)),
(DEFAULT_DATE + timedelta(days=3), DEFAULT_DATE + timedelta(days=4)),
],
True,
TimeRestriction(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=4), True),
],
[
(DEFAULT_DATE, None),
[(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=1)), (DEFAULT_DATE, None)],
True,
TimeRestriction(DEFAULT_DATE, None, True),
],
# catchup=False cases - same time boundaries but different catchup flag
[
(DEFAULT_DATE, None),
[
(DEFAULT_DATE + timedelta(days=1), DEFAULT_DATE + timedelta(days=2)),
(DEFAULT_DATE + timedelta(days=3), DEFAULT_DATE + timedelta(days=4)),
],
False,
TimeRestriction(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=4), False),
],
[
(DEFAULT_DATE, None),
[(DEFAULT_DATE, DEFAULT_DATE + timedelta(days=1)), (DEFAULT_DATE, None)],
False,
TimeRestriction(DEFAULT_DATE, None, False),
],
],
)
def test__time_restriction(dag_maker, dag_date, tasks_date, catchup, restrict):
"""
Test that _time_restriction correctly reflects the DAG's time constraints with different catchup settings.
With catchup=True, future task start dates are respected.
With catchup=False, the scheduler may schedule tasks regardless of their future start dates.
"""
with dag_maker(
"test__time_restriction",
schedule=None,
catchup=catchup, # Use the parametrized catchup value
start_date=dag_date[0],
end_date=dag_date[1],
) as dag:
EmptyOperator(task_id="do1", start_date=tasks_date[0][0], end_date=tasks_date[0][1])
EmptyOperator(task_id="do2", start_date=tasks_date[1][0], end_date=tasks_date[1][1])
assert dag._time_restriction == restrict
@pytest.mark.need_serialized_dag
def test_get_asset_triggered_next_run_info(dag_maker, clear_assets):
asset1 = Asset(uri="test://asset1", name="test_asset1", group="test-group")
asset2 = Asset(uri="test://asset2", group="test-group")
asset3 = Asset(uri="test://asset3", group="test-group")
with dag_maker(dag_id="assets-1", schedule=[asset2]):
pass
dag1 = dag_maker.dag
with dag_maker(dag_id="assets-2", schedule=[asset1, asset2]):
pass
dag2 = dag_maker.dag
with dag_maker(dag_id="assets-3", schedule=[asset1, asset2, asset3]):
pass
dag3 = dag_maker.dag
session = dag_maker.session
asset1_id = session.query(AssetModel.id).filter_by(uri=asset1.uri).scalar()
session.bulk_save_objects(
[
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=dag2.dag_id),
AssetDagRunQueue(asset_id=asset1_id, target_dag_id=dag3.dag_id),
]
)
session.flush()
assets = session.query(AssetModel.uri).order_by(AssetModel.id).all()
info = get_asset_triggered_next_run_info([dag1.dag_id], session=session)
assert info[dag1.dag_id] == {
"ready": 0,
"total": 1,
"uri": assets[0].uri,
}
# This time, check both dag2 and dag3 at the same time (tests filtering)
info = get_asset_triggered_next_run_info([dag2.dag_id, dag3.dag_id], session=session)
assert info[dag2.dag_id] == {
"ready": 1,
"total": 2,
"uri": "",
}
assert info[dag3.dag_id] == {
"ready": 1,
"total": 3,
"uri": "",
}
@pytest.mark.need_serialized_dag
def test_get_asset_triggered_next_run_info_with_unresolved_asset_alias(dag_maker, clear_assets):
asset_alias1 = AssetAlias(name="alias")
with dag_maker(dag_id="dag-1", schedule=[asset_alias1]):
pass
dag1 = dag_maker.dag
session = dag_maker.session
session.flush()
info = get_asset_triggered_next_run_info([dag1.dag_id], session=session)
assert info == {}
dag1_model = DagModel.get_dagmodel(dag1.dag_id)
assert dag1_model.get_asset_triggered_next_run_info(session=session) is None
@pytest.mark.parametrize(
"run_id_type",
[DagRunType.BACKFILL_JOB, DagRunType.SCHEDULED, DagRunType.ASSET_TRIGGERED],
)
def test_create_dagrun_disallow_manual_to_use_automated_run_id(run_id_type: DagRunType) -> None:
dag = DAG(dag_id="test", start_date=DEFAULT_DATE, schedule="@daily")
run_id = DagRun.generate_run_id(run_type=run_id_type, run_after=DEFAULT_DATE, logical_date=DEFAULT_DATE)
with pytest.raises(
ValueError,
match=re.escape(
f"A manual DAG run cannot use ID {run_id!r} since it is reserved for {run_id_type.value} runs"
),
):
SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag)).create_dagrun(
run_type=DagRunType.MANUAL,
run_id=run_id,
logical_date=DEFAULT_DATE,
data_interval=(DEFAULT_DATE, DEFAULT_DATE),
run_after=DEFAULT_DATE,
state=DagRunState.QUEUED,
triggered_by=DagRunTriggeredByType.TEST,
)
| TestQueries |
python | sanic-org__sanic | sanic/http/tls/creators.py | {
"start": 5280,
"end": 7907
} | class ____(CertCreator):
def check_supported(self) -> None:
try:
subprocess.run( # nosec B603 B607
["mkcert", "-help"],
check=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
except Exception as e:
raise SanicException(
"Sanic is attempting to use mkcert to generate local TLS "
"certificates since you did not supply a certificate, but "
"one is required. Sanic cannot proceed since mkcert does not "
"appear to be installed. Alternatively, you can use trustme. "
"Please install mkcert, trustme, or supply TLS certificates "
"to proceed. Installation instructions can be found here: "
"https://github.com/FiloSottile/mkcert.\n"
"Find out more information about your options here: "
"https://sanic.dev/en/guide/deployment/development.html#"
"automatic-tls-certificate"
) from e
def generate_cert(self, localhost: str) -> ssl.SSLContext:
try:
if not self.cert_path.exists():
message = "Generating TLS certificate"
# TODO: Validate input for security
with loading(message):
cmd = [
"mkcert",
"-key-file",
str(self.key_path),
"-cert-file",
str(self.cert_path),
localhost,
]
resp = subprocess.run( # nosec B603
cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
sys.stdout.write("\r" + " " * (len(message) + 4))
sys.stdout.flush()
sys.stdout.write(resp.stdout)
finally:
@self.app.main_process_stop
async def cleanup(*_): # no cov
if self.tmpdir:
with suppress(FileNotFoundError):
self.key_path.unlink()
self.cert_path.unlink()
self.tmpdir.rmdir()
context = CertSimple(self.cert_path, self.key_path)
context.sanic["creator"] = "mkcert"
context.sanic["localhost"] = localhost
SanicSSLContext.create_from_ssl_context(context)
return context
| MkcertCreator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 8892,
"end": 9308
} | class ____(sgqlc.types.Enum):
"""The possible base permissions for repositories.
Enumeration Choices:
* `ADMIN`: Can read, write, and administrate repos by default
* `NONE`: No access
* `READ`: Can read repos by default
* `WRITE`: Can read and write repos by default
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "NONE", "READ", "WRITE")
| DefaultRepositoryPermissionField |
python | ansible__ansible | lib/ansible/plugins/lookup/password.py | {
"start": 12856,
"end": 17554
} | class ____(LookupBase):
def _parse_parameters(self, term):
"""Hacky parsing of params
See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
and the first_found lookup For how we want to fix this later
"""
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
relpath = term
params = dict()
else:
relpath = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
relpath = u' '.join((relpath, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(relpath):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# update options with what we got
if params:
self.set_options(direct=params)
# chars still might need more
chars = params.get('chars', self.get_option('chars'))
if chars and isinstance(chars, str):
tmp_chars = []
if u',,' in chars:
tmp_chars.append(u',')
tmp_chars.extend(c for c in chars.replace(u',,', u',').split(u',') if c)
self.set_option('chars', tmp_chars)
# return processed params
for field in VALID_PARAMS:
params[field] = self.get_option(field)
return relpath, params
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
self.set_options(var_options=variables, direct=kwargs)
changed = None
relpath, params = self._parse_parameters(term)
path = self._loader.path_dwim(relpath)
b_path = to_bytes(path, errors='surrogate_or_strict')
chars = _gen_candidate_chars(params['chars'])
ident = None
first_process = None
lockfile = None
try:
# make sure only one process finishes all the job first
first_process, lockfile = _get_lock(b_path)
content = _read_password_file(b_path)
if content is None or b_path == to_bytes('/dev/null'):
plaintext_password = random_password(params['length'], chars, params['seed'])
salt = None
changed = True
else:
plaintext_password, salt, ident = _parse_content(content)
encrypt = params['encrypt']
if encrypt and not salt:
changed = True
try:
salt = random_salt(BaseHash.algorithms[encrypt].salt_size)
except KeyError:
salt = random_salt()
if not ident:
ident = params['ident']
elif params['ident'] and ident != params['ident']:
raise AnsibleError('The ident parameter provided (%s) does not match the stored one (%s).' % (ident, params['ident']))
if encrypt and not ident:
try:
ident = BaseHash.algorithms[encrypt].implicit_ident
except KeyError:
ident = None
if ident:
changed = True
if changed and b_path != to_bytes('/dev/null'):
content = _format_content(plaintext_password, salt, encrypt=encrypt, ident=ident)
_write_password_file(b_path, content)
finally:
if first_process:
# let other processes continue
_release_lock(lockfile)
if encrypt:
password = do_encrypt(plaintext_password, encrypt, salt=salt, ident=ident)
ret.append(password)
else:
ret.append(plaintext_password)
return ret
| LookupModule |
python | modin-project__modin | modin/experimental/torch/datasets.py | {
"start": 1016,
"end": 2779
} | class ____:
"A self explainatory class to convert a DataFrame into a DataLoader that batches rows."
def __init__(
self,
df: DataFrame | ModinDataFrame,
batch_size: int,
features: Sequence[Hashable] = (),
sampler: Type[Sampler] | Sampler = SequentialSampler,
) -> None:
"""
Converts a Pandas/Modin DataFrame into a torch DataLoader.
NOTE: This function should eventually go into modin/utils.py.
Parameters
----------
df : DataFrame
batch_size : int, default: 1
features : Sequence[Hashable], default: ()
If specified, only these features will be used.
sampler: Type[Sampler] | Sampler, default: SequentialSampler
The sampler to use. By default, iterates over the DataFrame in order.
Returns
-------
DataLoader
DataLoader object backed by desired data.
"""
if features:
df = df[features]
if isinstance(sampler, type):
sampler = sampler(df)
self._df = df
self._batch_size = batch_size
self._sampler = sampler
def __len__(self):
# Sampler length is always valid.
return math.ceil(len(self._sampler) / self._batch_size)
def __iter__(self):
idx_buffer = []
for cnt, idx in enumerate(self._sampler):
idx_buffer.append(idx)
if self._end_of_batch(cnt):
yield self._df.iloc[idx_buffer].to_numpy()
idx_buffer = []
def _end_of_batch(self, counter: int):
return (
counter % self._batch_size == self._batch_size - 1
or counter == len(self._sampler) - 1
)
| ModinDataLoader |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/base.py | {
"start": 535,
"end": 10514
} | class ____(WebSocketHandler):
"""
WebSocket handler for opening files and streaming data.
The protocol on message receive (JSON messages):
{
"method": "read", # "write", "seek", etc. (required)
"kwargs": {...}, (optional)
"data": "<base64-encoded chunk>", # all data is base64-encoded (optional)
}
The protocol for sending data back to the client:
{
"status": 200, # HTTP status code (required)
"data": "<base64-encoded chunk>", # response data if any (optional)
"error": {"message": "error message", (required)
"traceback": ["line1", "line2", ...] (optional)} # if an error occurred (optional)
}
"""
LOCK_TIMEOUT = 100 # seconds
max_message_size = 5 * 1024 * 1024 * 1024 # 5 GB
__thread_lock = threading.Lock()
# ----------------------------------------------------------------
# Tornado WebSocket / Handler Hooks
# ----------------------------------------------------------------
async def open(self):
"""Open file."""
path = self.get_path_argument("path")
self.mode = self.get_argument("mode", default="r")
self.atomic = self.get_argument("atomic", default="false") == "true"
lock = self.get_argument("lock", default="false") == "true"
self.encoding = self.get_argument("encoding", default="utf-8")
self.file: FileIO = None
try:
self.path = self._load_path(path)
if lock and not await self._acquire_lock(path):
self.close(
1002,
self._parse_json(
HTTPStatus.LOCKED, message="File is locked"
),
)
return
self.file = await self._open_file()
except OSError as e:
self.log.warning("Error opening file", exc_info=e)
self.close(1002, self._parse_os_error(e))
except Exception as e:
self.log.exception("Error opening file")
self.close(1002, self._parse_error(e))
else:
await self._send_json(HTTPStatus.OK)
def on_close(self):
"""Close file."""
if self.file is not None:
self._close_file()
if self.__locked:
self._release_lock()
async def on_message(self, raw_message):
"""Handle incoming messages."""
self.log.debug("Received message: %s", raw_message)
try:
await self.handle_message(raw_message)
except Exception as e:
self.log.exception("Error handling message")
await self.write_message(self._parse_error(e), binary=True)
# ----------------------------------------------------------------
# Internal Helpers
# ----------------------------------------------------------------
async def handle_message(self, raw_message):
msg = self._decode_json(raw_message)
method, kwargs = await self._parse_message(msg)
await self._run_method(method, kwargs)
async def _open_file(self):
"""Open the file in the requested mode."""
if self.atomic and ("+" in self.mode or
"a" in self.mode or
"w" in self.mode):
if self.path.exists() and "w" not in self.mode:
copy2(self.path, self.atomic_path)
return self.atomic_path.open(self.mode)
return self.path.open(self.mode)
def _close_file(self):
self.file.close()
if self.atomic:
self.atomic_path.replace(self.path)
async def _run_method(self, method, kwargs):
"""Run a method with kwargs."""
try:
result = await getattr(self, f"_handle_{method}")(**kwargs)
except OSError as e:
self.log.warning("Error handling method: %s", method)
await self.write_message(self._parse_os_error(e), binary=True)
else:
await self._send_result(result)
async def _parse_message(self, msg):
"""Parse a message into method and kwargs."""
method = msg.pop("method", None)
if "data" in msg and isinstance(msg["data"], list):
msg["data"] = [self._decode_data(d) for d in msg["data"]]
elif "data" in msg:
msg["data"] = self._decode_data(msg["data"])
return method, msg
async def _acquire_lock(self, __start_time=None):
"""Acquire a lock on the file."""
if __start_time is None:
__start_time = time.time()
while self.__locked:
await asyncio.sleep(1)
if time.time() - __start_time > self.LOCK_TIMEOUT:
return False
with self.__thread_lock:
if self.__locked:
return await self._acquire_lock(__start_time=__start_time)
self.lock_path.touch(exist_ok=False)
return True
def _release_lock(self):
"""Release the lock on the file."""
with self.__thread_lock:
self.lock_path.unlink(missing_ok=True)
@property
def atomic_path(self):
"""Get the path to the atomic file."""
return self.path.parent / f".{self.path.name}.spyder.tmp"
@property
def lock_path(self):
"""Get the path to the atomic file."""
return self.path.parent / f".{self.path.name}.spyder.lck"
@property
def __locked(self):
return Path(self.lock_path).exists()
def _decode_json(self, raw_message):
"""Decode a JSON message (non-streamed)."""
return orjson.loads(raw_message)
async def _send_json(self, status: HTTPStatus, **data: dict):
"""Send a single JSON message."""
await self.write_message(self._parse_json(status, **data), binary=True)
def _parse_json(self, status: HTTPStatus, **data: dict) -> bytes:
"""Parse a single JSON message."""
return orjson.dumps({"status": status.value, **data})
def _parse_error(self, error: BaseException) -> bytes:
"""Parse an error response to the client."""
return self._parse_json(
HTTPStatus.INTERNAL_SERVER_ERROR,
message=str(error),
tracebacks=traceback.format_exception(
type(error), error, error.__traceback__
),
type=str(type(error)),
)
def _parse_os_error(self, e: OSError) -> bytes:
"""Parse an OSError response to the client."""
return self._parse_json(
HTTPStatus.EXPECTATION_FAILED,
strerror=e.strerror,
filename=e.filename,
errno=e.errno,
)
async def _send_msg_error(self, message):
await self._send_json(
HTTPStatus.BAD_REQUEST, message=message,
)
async def _send_result(self, result):
if result is None:
await self._send_json(HTTPStatus.NO_CONTENT)
elif isinstance(result, list):
await self._send_json(
HTTPStatus.OK, data=[self._encode_data(r) for r in result],
)
else:
await self._send_json(
HTTPStatus.OK, data=self._encode_data(result),
)
def _decode_data(self, data: str | object) -> str | bytes | object:
"""Decode data from a message."""
if not isinstance(data, str):
return data
if "b" in self.mode:
return base64.b64decode(data)
return base64.b64decode(data).decode(self.encoding)
def _encode_data(self, data: bytes | str | object) -> str:
"""Encode data for a message."""
if isinstance(data, bytes):
return base64.b64encode(data).decode("ascii")
if isinstance(data, str):
return base64.b64encode(data.encode(self.encoding)).decode("ascii")
return data
def _load_path(self, path_str: str) -> Path:
"""Convert path string to a Path object."""
return Path(path_str).expanduser()
# ----------------------------------------------------------------
# File Operation
# ----------------------------------------------------------------
async def _handle_write(self, data: bytes | str) -> int:
"""Write data to the file."""
return self.file.write(data)
async def _handle_flush(self):
"""Flush the file."""
return self.file.flush()
async def _handle_read(self, n: int = -1) -> bytes | str:
"""Read data from the file."""
return self.file.read(n)
async def _handle_seek(self, offset: int, whence: int = 0) -> int:
"""Seek to a new position in the file."""
return self.file.seek(offset, whence)
async def _handle_tell(self) -> int:
"""Get the current file position."""
return self.file.tell()
async def _handle_truncate(self, size: int | None = None) -> int:
"""Truncate the file to a new size."""
return self.file.truncate(size)
async def _handle_fileno(self):
"""Flush the file to disk."""
return self.file.fileno()
async def _handle_readline(self, size: int = -1) -> bytes | str:
"""Read a line from the file."""
return self.file.readline(size)
async def _handle_readlines(self, hint: int = -1) -> list[bytes | str]:
"""Read lines from the file."""
return self.file.readlines(hint)
async def _handle_writelines(self, lines: list[bytes | str]):
"""Write lines to the file."""
return self.file.writelines(lines)
async def _handle_isatty(self) -> bool:
"""Check if the file is a TTY."""
return self.file.isatty()
async def _handle_readable(self) -> bool:
"""Check if the file is readable."""
return self.file.readable()
async def _handle_writable(self) -> bool:
"""Check if the file is writable."""
return self.file.writable()
| FileWebSocketHandler |
python | Netflix__metaflow | metaflow/plugins/azure/includefile_support.py | {
"start": 150,
"end": 4298
} | class ____(object):
TYPE = "azure"
@classmethod
def get_root_from_config(cls, echo, create_on_absent=True):
from metaflow.metaflow_config import DATATOOLS_AZUREROOT
return DATATOOLS_AZUREROOT
def __init__(self):
# This local directory is used to house any downloaded blobs, for lifetime of
# this object as a context manager.
self._tmpdir = None
def _get_storage_backend(self, key):
"""
Return an AzureDatastore, rooted at the container level, no prefix.
Key MUST be a fully qualified path. e.g. <container_name>/b/l/o/b/n/a/m/e
"""
from metaflow.plugins.azure.azure_utils import parse_azure_full_path
# we parse out the container name only, and use that to root our storage implementation
container_name, _ = parse_azure_full_path(key)
# Import DATASTORES dynamically... otherwise, circular import
from metaflow.plugins import DATASTORES
storage_impl = [d for d in DATASTORES if d.TYPE == "azure"][0]
return storage_impl(container_name)
def __enter__(self):
return self
def __exit__(self, *args):
if self._tmpdir and os.path.exists(self._tmpdir):
shutil.rmtree(self._tmpdir)
def get(self, key=None, return_missing=False):
"""Key MUST be a fully qualified path with uri scheme. azure://<container_name>/b/l/o/b/n/a/m/e"""
# Azure.get() is meant for use within includefile.py ONLY.
# All existing call sites set return_missing=True.
#
# Support for return_missing=False may be added if/when the situation changes.
if not return_missing:
raise MetaflowException("Azure object supports only return_missing=True")
# We fabricate a uri scheme to fit into existing includefile code (just like local://)
if not key.startswith("azure://"):
raise MetaflowInternalError(
msg="Expected Azure object key to start with 'azure://'"
)
uri_style_key = key
short_key = key[8:]
storage = self._get_storage_backend(short_key)
azure_object = None
with storage.load_bytes([short_key]) as load_result:
for _, tmpfile, _ in load_result:
if tmpfile is None:
azure_object = AzureObject(uri_style_key, None, False, None)
else:
if not self._tmpdir:
self._tmpdir = mkdtemp(prefix="metaflow.includefile.azure.")
output_file_path = os.path.join(self._tmpdir, str(uuid.uuid4()))
shutil.move(tmpfile, output_file_path)
# Beats making another Azure API call!
sz = os.stat(output_file_path).st_size
azure_object = AzureObject(
uri_style_key, output_file_path, True, sz
)
break
return azure_object
def put(self, key, obj, overwrite=True):
"""Key MUST be a fully qualified path. <container_name>/b/l/o/b/n/a/m/e"""
storage = self._get_storage_backend(key)
storage.save_bytes([(key, io.BytesIO(obj))], overwrite=overwrite)
# We fabricate a uri scheme to fit into existing includefile code (just like local://)
return "azure://%s" % key
def info(self, key=None, return_missing=False):
# We fabricate a uri scheme to fit into existing includefile code (just like local://)
if not key.startswith("azure://"):
raise MetaflowInternalError(
msg="Expected Azure object key to start with 'azure://'"
)
# aliasing this purely for clarity
uri_style_key = key
short_key = key[8:]
storage = self._get_storage_backend(short_key)
blob_size = storage.size_file(short_key)
blob_exists = blob_size is not None
if not blob_exists and not return_missing:
raise MetaflowException("Azure blob '%s' not found" % uri_style_key)
return AzureObject(uri_style_key, None, blob_exists, blob_size)
| Azure |
python | spack__spack | lib/spack/spack/bootstrap/clingo.py | {
"start": 1616,
"end": 9142
} | class ____:
def __init__(self, configuration):
_add_compilers_if_missing()
self.host_platform = spack.platforms.host()
self.host_os = self.host_platform.default_operating_system()
self.host_target = spack.vendor.archspec.cpu.host().family
self.host_architecture = spack.spec.ArchSpec.default_arch()
self.host_architecture.target = str(self.host_target)
self.host_compiler = self._valid_compiler_or_raise()
self.host_python = self.python_external_spec()
if str(self.host_platform) == "linux":
self.host_libc = self.libc_external_spec()
self.external_cmake, self.external_bison = self._externals_from_yaml(configuration)
def _valid_compiler_or_raise(self):
if str(self.host_platform) == "linux":
compiler_name = "gcc"
elif str(self.host_platform) == "darwin":
compiler_name = "apple-clang"
elif str(self.host_platform) == "windows":
compiler_name = "msvc"
elif str(self.host_platform) == "freebsd":
compiler_name = "llvm"
else:
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
candidates = [
x
for x in spack.compilers.config.CompilerFactory.from_packages_yaml(spack.config.CONFIG)
if x.name == compiler_name
]
if not candidates:
raise RuntimeError(
f"Cannot find any version of {compiler_name} to bootstrap clingo from sources"
)
candidates.sort(key=lambda x: x.version, reverse=True)
best = candidates[0]
# Get compilers for bootstrapping from the 'builtin' repository
best.namespace = "builtin"
# If the compiler does not support C++ 14, fail with a legible error message
try:
_ = best.package.standard_flag(language="cxx", standard="14")
except RuntimeError as e:
raise RuntimeError(
"cannot find a compiler supporting C++ 14 [needed to bootstrap clingo]"
) from e
return candidates[0]
def _externals_from_yaml(
self, configuration: "spack.config.Configuration"
) -> Tuple[Optional["spack.spec.Spec"], Optional["spack.spec.Spec"]]:
packages_yaml = configuration.get("packages")
requirements = {"cmake": "@3.20:", "bison": "@2.5:"}
selected: Dict[str, Optional["spack.spec.Spec"]] = {"cmake": None, "bison": None}
for pkg_name in ["cmake", "bison"]:
if pkg_name not in packages_yaml:
continue
candidates = packages_yaml[pkg_name].get("externals", [])
for candidate in candidates:
s = spack.spec.Spec(candidate["spec"], external_path=candidate["prefix"])
if not s.satisfies(requirements[pkg_name]):
continue
if not s.intersects(f"arch={self.host_architecture}"):
continue
selected[pkg_name] = self._external_spec(s)
break
return selected["cmake"], selected["bison"]
def prototype_path(self) -> pathlib.Path:
"""Path to a prototype concrete specfile for clingo"""
parent_dir = pathlib.Path(__file__).parent
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-{self.host_target}.json"
if str(self.host_platform) == "linux":
# Using aarch64 as a fallback, since it has gnuconfig (x86_64 doesn't have it)
if not result.exists():
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-aarch64.json"
elif str(self.host_platform) == "freebsd":
result = parent_dir / "prototypes" / f"clingo-{self.host_platform}-amd64.json"
elif not result.exists():
raise RuntimeError(f"Cannot bootstrap clingo from sources on {self.host_platform}")
return result
def concretize(self) -> "spack.spec.Spec":
# Read the prototype and mark it NOT concrete
s = spack.spec.Spec.from_specfile(str(self.prototype_path()))
s._mark_concrete(False)
# These are nodes in the cmake stack, whose versions are frequently deprecated for
# security reasons. In case there is no external cmake on this machine, we'll update
# their versions to the most preferred, within the valid range, according to the
# repository we know.
to_be_updated = {
pkg_name: (spack.repo.PATH.get_pkg_class(pkg_name), valid_versions)
for pkg_name, valid_versions in {
"ca-certificates-mozilla": ":",
"openssl": "3:3",
"curl": "8:8",
"cmake": "3.16:3",
"libiconv": "1:1",
"ncurses": "6:6",
"m4": "1.4",
}.items()
}
# Tweak it to conform to the host architecture + update the version of a few dependencies
for node in s.traverse():
# Clear patches, we'll compute them correctly later
node.patches.clear()
if "patches" in node.variants:
del node.variants["patches"]
node.architecture.os = str(self.host_os)
node.architecture = self.host_architecture
if node.name == "gcc-runtime":
node.versions = self.host_compiler.versions
if node.name in to_be_updated:
pkg_cls, valid_versions = to_be_updated[node.name]
_select_best_version(pkg_cls=pkg_cls, node=node, valid_versions=valid_versions)
# Can't use re2c@3.1 with Python 3.6
if self.host_python.satisfies("@3.6"):
s["re2c"].versions.versions = [spack.version.from_string("=2.2")]
for edge in spack.traverse.traverse_edges([s], cover="edges"):
if edge.spec.name == "python":
edge.spec = self.host_python
if edge.spec.name == "bison" and self.external_bison:
edge.spec = self.external_bison
if edge.spec.name == "cmake" and self.external_cmake:
edge.spec = self.external_cmake
if edge.spec.name == self.host_compiler.name:
edge.spec = self.host_compiler
if "libc" in edge.virtuals:
edge.spec = self.host_libc
spack.spec._inject_patches_variant(s)
s._finalize_concretization()
# Work around the fact that the installer calls Spec.dependents() and
# we modified edges inconsistently
return s.copy()
def python_external_spec(self) -> "spack.spec.Spec":
"""Python external spec corresponding to the current running interpreter"""
result = spack.spec.Spec(spec_for_current_python(), external_path=sys.exec_prefix)
return self._external_spec(result)
def libc_external_spec(self) -> "spack.spec.Spec":
detector = spack.compilers.libraries.CompilerPropertyDetector(self.host_compiler)
result = detector.default_libc()
return self._external_spec(result)
def _external_spec(self, initial_spec) -> "spack.spec.Spec":
initial_spec.namespace = "builtin"
initial_spec.architecture = self.host_architecture
for flag_type in spack.spec.FlagMap.valid_compiler_flags():
initial_spec.compiler_flags[flag_type] = []
return spack.spec.parse_with_version_concrete(initial_spec)
| ClingoBootstrapConcretizer |
python | dagster-io__dagster | python_modules/libraries/dagster-spark/dagster_spark/resources.py | {
"start": 341,
"end": 2055
} | class ____:
def __init__(self, logger):
self.logger = check.inst_param(logger, "logger", DagsterLogManager)
def run_spark_job(self, config, main_class):
check.dict_param(config, "config")
check.str_param(main_class, "main_class")
# Extract parameters from config
(
master_url,
deploy_mode,
application_jar,
spark_conf,
application_arguments,
spark_home,
) = [
config.get(k)
for k in (
"master_url",
"deploy_mode",
"application_jar",
"spark_conf",
"application_arguments",
"spark_home",
)
]
if not os.path.exists(application_jar):
raise SparkOpError(
f"Application jar {application_jar} does not exist. A valid jar must be "
"built before running this op."
)
spark_shell_cmd = construct_spark_shell_command(
application_jar=application_jar,
main_class=main_class,
master_url=master_url,
spark_conf=spark_conf,
deploy_mode=deploy_mode,
application_arguments=application_arguments,
spark_home=spark_home,
)
self.logger.info("Running spark-submit: " + " ".join(spark_shell_cmd))
retcode = subprocess.call(" ".join(spark_shell_cmd), shell=True)
if retcode != 0:
raise SparkOpError("Spark job failed. Please consult your logs.")
@dagster_maintained_resource
@resource
def spark_resource(context):
return SparkResource(context.log)
| SparkResource |
python | getsentry__sentry | tests/acceptance/test_project_ownership.py | {
"start": 117,
"end": 900
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.path = f"/settings/{self.organization.slug}/projects/{self.project.slug}/ownership/"
def test_simple(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.wait_until_test_id("ownership-rules-table")
def test_open_modal(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.wait_until_test_id("ownership-rules-table")
self.browser.click('[aria-label="Edit Rules"]')
self.browser.wait_until("[role='dialog']")
self.browser.wait_until_not("div[class$='loadingIndicator']")
| ProjectOwnershipTest |
python | openai__openai-python | src/openai/types/beta/realtime/input_audio_buffer_clear_event.py | {
"start": 232,
"end": 489
} | class ____(BaseModel):
type: Literal["input_audio_buffer.clear"]
"""The event type, must be `input_audio_buffer.clear`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| InputAudioBufferClearEvent |
python | getsentry__sentry | src/sentry/issues/endpoints/event_grouping_info.py | {
"start": 698,
"end": 2261
} | class ____(ProjectEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: HttpRequest, project, event_id) -> HttpResponse:
"""
Returns the grouping information for an event
`````````````````````````````````````````````
This endpoint returns a JSON dump of the metadata that went into the
grouping algorithm.
"""
event = eventstore.backend.get_event_by_id(project.id, event_id)
if event is None:
raise ResourceDoesNotExist
grouping_config = load_grouping_config(event.get_grouping_config())
# We want the stacktraces in the grouping info to match the issue details page's main
# stacktrace, which by default is upside down compared to the event JSON. Therefore, unless
# user has set a preference to prevent it, we want to flip the grouping info stacktraces,
# too.
should_reverse_stacktraces = (
get_option_from_list(
user_option_service.get_many(filter={"user_ids": [request.user.id]}),
key="stacktrace_order",
)
!= StacktraceOrder.MOST_RECENT_LAST
)
grouping_config.initial_context["reverse_stacktraces"] = should_reverse_stacktraces
grouping_info = get_grouping_info(grouping_config, project, event)
return HttpResponse(
orjson.dumps(grouping_info, option=orjson.OPT_UTC_Z), content_type="application/json"
)
| EventGroupingInfoEndpoint |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 52488,
"end": 52906
} | class ____:
xlDay = 1 # from enum XlDataSeriesDate
xlMonth = 3 # from enum XlDataSeriesDate
xlWeekday = 2 # from enum XlDataSeriesDate
xlYear = 4 # from enum XlDataSeriesDate
xlAutoFill = 4 # from enum XlDataSeriesType
xlChronological = 3 # from enum XlDataSeriesType
xlDataSeriesLinear = -4132 # from enum XlDataSeriesType
xlGrowth = 2 # from enum XlDataSeriesType
| DataSeriesDate |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1500078,
"end": 1501452
} | class ____(sgqlc.types.Type, Node):
"""An update sent to sponsors of a user or organization on GitHub
Sponsors.
"""
__schema__ = github_schema
__field_names__ = ("author", "body", "created_at", "is_published", "sponsorable", "subject", "updated_at")
author = sgqlc.types.Field("User", graphql_name="author")
"""The author of the newsletter."""
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The contents of the newsletter, the message the sponsorable wanted
to give.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
is_published = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPublished")
"""Indicates if the newsletter has been made available to sponsors."""
sponsorable = sgqlc.types.Field(sgqlc.types.non_null(Sponsorable), graphql_name="sponsorable")
"""The user or organization this newsletter is from."""
subject = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="subject")
"""The subject of the newsletter, what it's about."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| SponsorshipNewsletter |
python | pytorch__pytorch | torch/_export/serde/schema_check.py | {
"start": 14137,
"end": 15057
} | class ____ {{
static_assert(!std::is_reference_v<T>, "ForwardRef cannot be a reference type");
public:
ForwardRef(): ptr_(std::make_unique<T>()) {{}}
ForwardRef(ForwardRef<T>&&);
ForwardRef(const ForwardRef<T>& other): ptr_(std::make_unique<T>(*other.ptr_)) {{}}
ForwardRef<T>& operator=(ForwardRef<T>&&);
ForwardRef<T>& operator=(const ForwardRef<T>& other) {{
ptr_ = std::make_unique<T>(*other.ptr_);
return *this;
}}
~ForwardRef();
const T& operator*() const {{
return *ptr_;
}}
const T* operator->() const {{
return ptr_.get();
}}
void emplace(T&& t) {{
ptr_ = std::make_unique<T>(std::move(t));
}}
private:
std::unique_ptr<T> ptr_;
}};
template <typename T>
void to_json(nlohmann::json& j, const ForwardRef<T>& p) {{
j = *p;
}}
template <typename T>
void from_json(const nlohmann::json& j, ForwardRef<T>& p) {{
p.emplace(j.template get<T>());
}}
| ForwardRef |
python | apache__thrift | lib/py/src/protocol/TBinaryProtocol.py | {
"start": 6451,
"end": 7091
} | class ____(TProtocolFactory):
def __init__(self, strictRead=False, strictWrite=True, **kwargs):
self.strictRead = strictRead
self.strictWrite = strictWrite
self.string_length_limit = kwargs.get('string_length_limit', None)
self.container_length_limit = kwargs.get('container_length_limit', None)
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite,
string_length_limit=self.string_length_limit,
container_length_limit=self.container_length_limit)
return prot
| TBinaryProtocolFactory |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v2_hd_invalid_input_test.py | {
"start": 1070,
"end": 3378
} | class ____(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def test_build_incorrect_output_shapes(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Output shapes is set in the mid_level_api, but build with incorrect output
# shapes.
mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]
with self.assertRaisesRegex(ValueError,
'Inconsistent shape founded for input feature'):
mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])
def test_enqueue_incorrect_shape_feature(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
mid_level_api._output_shapes = [TensorShape((1, 1)) for _ in range(3)]
# The output shape passed to build method is consistent.
mid_level_api.build([TensorShape([1, 1, 1]) for _ in range(3)])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(sparse_iter), training=False)
return strategy.run(step)
# Enqueued tensor has shape inconsistent with the output shape setting.
with self.assertRaisesRegex(ValueError,
'Inconsistent shape founded for input feature'):
test_fn()
def test_not_fully_defined_output_shapes_in_feature_config(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Feature config sets undefined output shapes
mid_level_api._output_shapes = [TensorShape(None) for _ in range(3)]
with self.assertRaisesRegex(ValueError, 'Input Feature'):
mid_level_api.build()
def test_not_fully_defined_output_shapes_for_build(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# Build with undefined output shape
with self.assertRaisesRegex(ValueError, 'Input Feature'):
mid_level_api.build([TensorShape([1, None, None]) for _ in range(3)])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingTest |
python | pytorch__pytorch | torch/testing/_internal/opinfo/core.py | {
"start": 3688,
"end": 11141
} | class ____:
"""Represents sample inputs to a function."""
__slots__ = [
"input",
"args",
"kwargs",
"output_process_fn_grad",
"broadcasts_input",
"name",
]
def __init__(
self,
input,
*var_args,
args=None,
kwargs=None,
output_process_fn_grad=None,
broadcasts_input=None,
name=None,
**var_kwargs,
):
# input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
self.input = input
# Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as
# SampleInput(input, *args, **kwargs) but not to mix the two forms
if args is not None or kwargs is not None:
assert not var_args and not var_kwargs, """
A SampleInput can be constructed "naturally" with *args and **kwargs or by
explicitly setting the "args" and "kwargs" parameters, but the two
methods of construction cannot be mixed!"""
elif var_args or var_kwargs:
assert (
output_process_fn_grad is None
and broadcasts_input is None
and name is None
), """
A SampleInput constructed "naturally" with *args and **kwargs
cannot specify additional metadata in keyword arguments"""
self.args = args if args is not None else var_args
assert isinstance(self.args, tuple)
self.kwargs = kwargs if kwargs is not None else var_kwargs
assert isinstance(self.kwargs, dict)
self.output_process_fn_grad = (
output_process_fn_grad
if output_process_fn_grad is not None
else lambda x: x
)
self.name = name if name is not None else ""
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimeError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = (
broadcasts_input if broadcasts_input is not None else False
)
def with_metadata(
self, *, output_process_fn_grad=None, broadcasts_input=None, name=None
):
if output_process_fn_grad is not None:
self.output_process_fn_grad = output_process_fn_grad
if broadcasts_input is not None:
self.broadcasts_input = broadcasts_input
if name is not None:
self.name = name
return self
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f"input={formatter(self.input)}",
f"args={formatter(self.args)}",
f"kwargs={formatter(self.kwargs)}",
f"broadcasts_input={self.broadcasts_input}",
f"name={repr(self.name)}",
]
return f"SampleInput({', '.join(a for a in arguments if a is not None)})"
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape))
dtype = str(arg.dtype)
device = str(arg.device)
contiguity_suffix = ""
# NB: sparse CSR tensors annoyingly return is_sparse=False
is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr
if not is_sparse and not arg.is_contiguous():
contiguity_suffix = ", contiguous=False"
return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]'
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
with torch.no_grad():
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = (
tt(self.input),
tt(self.args),
tt(self.kwargs),
)
# Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid!
return SampleInput(
sample_tt_input,
args=tt_args,
kwargs=tt_kwargs,
output_process_fn_grad=self.output_process_fn_grad,
broadcasts_input=self.broadcasts_input,
name=self.name + "_transformed",
)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
if t.dtype is torch.bfloat16:
return t.detach().cpu().to(torch.float32).numpy()
if t.dtype is torch.chalf:
return t.detach().cpu().to(torch.cfloat).numpy()
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return t
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
elif isinstance(t, torch.dtype):
return t
return t
return self.transform(to_noncontiguous)
NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"])
| SampleInput |
python | sympy__sympy | sympy/polys/domains/powerseriesring.py | {
"start": 2029,
"end": 6415
} | class ____(Ring[PowerSeriesElement[Er]], CompositeDomain):
"""A Domain class for representing univariate power series rings.
Notes
=====
This class is at experimental stage. Proper domain methods should be added to
integrate with SymPy's existing domain framework.
"""
is_PowerSeriesRing = is_Series = True
has_assoc_Ring = True
has_assoc_Field = False
ring: SeriesRingProto[Er]
dtype: type[PowerSeriesElement[Er]]
gen: PowerSeriesElement[Er]
symbol: Expr
domain: Domain[Er]
def __init__(self, domain: Domain[Er], symbol: Expr | str = "x", prec: int = 6):
ring, gen = power_series_ring(str(symbol), domain, prec)
self.ring = ring
self.gen = gen
self.dtype = ring.dtype
self.domain = ring.domain
self.symbol = ring.symbol
def __eq__(self, other: object) -> bool:
if isinstance(other, PowerSeriesRing):
return self.ring == other.ring
else:
return NotImplemented
def __hash__(self) -> int:
return hash((self.__class__.__name__, self.ring, self.domain, self.symbol))
def __repr__(self) -> str:
return f"{self.domain}[[{self.symbol}], {self.prec}]"
def new( # type: ignore
self, element: Expr | Er | int
) -> PowerSeriesElement[Er]:
return self.ring.ring_new(element)
def of_type(self, element) -> TypeIs[PowerSeriesElement[Er]]:
"""Check if ``a`` is of type ``dtype``."""
return self.ring.is_element(element)
@cached_property
def zero(self) -> PowerSeriesElement[Er]: # type: ignore
return self.ring.zero
@cached_property
def one(self) -> PowerSeriesElement[Er]: # type: ignore
return self.ring.one
@cached_property
def prec(self) -> int:
return self.ring.prec
def is_unit(self, a: PowerSeriesElement[Er]) -> bool:
"""Returns ``True`` if ``constant coefficient`` of series is a unit of ``self``"""
if not a.is_ground:
return False
K = self.domain
return K.is_unit(a.constant_coefficient())
def canonical_unit(self, a: PowerSeriesElement[Er]) -> PowerSeriesElement[Er]:
u = self.domain.canonical_unit(a.constant_coefficient())
return self.ring.from_ground(u)
def to_sympy(self, a: PowerSeriesElement[Er]) -> Expr:
"""Convert `a` to a SymPy object."""
return self.ring.to_expr(a)
def from_sympy(self, a: Expr) -> PowerSeriesElement[Er]:
"""Convert SymPy's expression to `dtype`."""
return self.ring.from_expr(a)
def from_ZZ(K1, a, K0):
"""Convert a Python `int` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def from_QQ(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`."""
return K1.ring.from_ground(K1.domain.convert(a, K0))
def is_positive(self, a: PowerSeriesElement[Er]) -> bool:
"""Returns True if `constant coefficient(a)` is positive."""
c = a.constant_coefficient()
return self.domain.is_positive(c)
def is_negative(self, a: PowerSeriesElement[Er]) -> bool:
"""Returns True if `constant coefficient(a)` is negative."""
c = a.constant_coefficient()
return self.domain.is_negative(c)
def is_nonpositive(self, a: PowerSeriesElement[Er]) -> bool:
"""Returns True if `constant coefficient(a)` is non-positive."""
c = a.constant_coefficient()
return self.domain.is_nonpositive(c)
def is_nonnegative(self, a: PowerSeriesElement[Er]) -> bool:
"""Returns True if `constant coefficient(a)` is non-negative."""
c = a.constant_coefficient()
return self.domain.is_nonnegative(c)
| PowerSeriesRing |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/transform_observation.py | {
"start": 13344,
"end": 15814
} | class ____(
TransformObservation[WrapperObsType, ActType, ObsType],
gym.utils.RecordConstructorArgs,
):
"""Resizes image observations using OpenCV to a specified shape.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.ResizeObservation`.
Example:
>>> import gymnasium as gym
>>> from gymnasium.wrappers import ResizeObservation
>>> env = gym.make("CarRacing-v3")
>>> env.observation_space.shape
(96, 96, 3)
>>> resized_env = ResizeObservation(env, (32, 32))
>>> resized_env.observation_space.shape
(32, 32, 3)
Change logs:
* v0.12.6 - Initially added
* v1.0.0 - Requires ``shape`` with a tuple of two integers
"""
def __init__(self, env: gym.Env[ObsType, ActType], shape: tuple[int, int]):
"""Constructor that requires an image environment observation space with a shape.
Args:
env: The environment to wrap
shape: The resized observation shape
"""
assert isinstance(env.observation_space, spaces.Box)
assert len(env.observation_space.shape) in {2, 3}
assert np.all(env.observation_space.low == 0) and np.all(
env.observation_space.high == 255
)
assert env.observation_space.dtype == np.uint8
assert isinstance(shape, tuple)
assert len(shape) == 2
assert all(np.issubdtype(type(elem), np.integer) for elem in shape)
assert all(x > 0 for x in shape)
try:
import cv2
except ImportError as e:
raise DependencyNotInstalled(
'opencv (cv2) is not installed, run `pip install "gymnasium[other]"`'
) from e
self.shape: Final[tuple[int, int]] = tuple(shape)
# for some reason, cv2.resize will return the shape in reverse
self.cv2_shape: Final[tuple[int, int]] = (shape[1], shape[0])
new_observation_space = spaces.Box(
low=0,
high=255,
shape=self.shape + env.observation_space.shape[2:],
dtype=np.uint8,
)
gym.utils.RecordConstructorArgs.__init__(self, shape=shape)
TransformObservation.__init__(
self,
env=env,
func=lambda obs: cv2.resize(
obs, self.cv2_shape, interpolation=cv2.INTER_AREA
),
observation_space=new_observation_space,
)
| ResizeObservation |
python | coleifer__peewee | examples/anomaly_detection.py | {
"start": 329,
"end": 1697
} | class ____(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
| StdDev |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/bzip2/package.py | {
"start": 218,
"end": 586
} | class ____(Package):
"""This packagae has the variants shared
defaulted to True"""
homepage = "https://example.com"
url = "https://example.com/bzip2-1.0.8tar.gz"
version("1.0.8", sha256="ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269")
variant("shared", default=True, description="Enables the build of shared libraries.")
| Bzip2 |
python | huggingface__transformers | src/transformers/models/swin2sr/modeling_swin2sr.py | {
"start": 37850,
"end": 39370
} | class ____(nn.Module):
def __init__(self, config, num_features):
super().__init__()
self.upscale = config.upscale
self.conv_bicubic = nn.Conv2d(config.num_channels, num_features, 3, 1, 1)
self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1)
self.activation = nn.LeakyReLU(inplace=True)
self.conv_aux = nn.Conv2d(num_features, config.num_channels, 3, 1, 1)
self.conv_after_aux = nn.Sequential(nn.Conv2d(3, num_features, 3, 1, 1), nn.LeakyReLU(inplace=True))
self.upsample = Upsample(config.upscale, num_features)
self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1)
def forward(self, sequence_output, bicubic, height, width):
bicubic = self.conv_bicubic(bicubic)
sequence_output = self.conv_before_upsample(sequence_output)
sequence_output = self.activation(sequence_output)
aux = self.conv_aux(sequence_output)
sequence_output = self.conv_after_aux(aux)
sequence_output = (
self.upsample(sequence_output)[:, :, : height * self.upscale, : width * self.upscale]
+ bicubic[:, :, : height * self.upscale, : width * self.upscale]
)
reconstruction = self.final_convolution(sequence_output)
return reconstruction, aux
@auto_docstring(
custom_intro="""
Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration.
"""
)
| PixelShuffleAuxUpsampler |
python | scipy__scipy | scipy/_lib/tests/test__util.py | {
"start": 16209,
"end": 23719
} | class ____:
def kmeans(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
def kmeans2(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
def barycentric(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
x1, x2, y1 = rng.random((3, 10))
f = interpolate.BarycentricInterpolator(x1, y1, **kwargs)
return f(x2)
def clarkson_woodruff_transform(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
return linalg.clarkson_woodruff_transform(rng.random((10, 10)), 3, **kwargs)
def basinhopping(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
return optimize.basinhopping(optimize.rosen, rng.random(3), **kwargs).x
def opt(self, fun, **kwargs):
rng = np.random.default_rng(3458934594269824562)
bounds = optimize.Bounds(-rng.random(3) * 10, rng.random(3) * 10)
return fun(optimize.rosen, bounds, **kwargs).x
def differential_evolution(self, **kwargs):
return self.opt(optimize.differential_evolution, **kwargs)
def dual_annealing(self, **kwargs):
return self.opt(optimize.dual_annealing, **kwargs)
def check_grad(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
x = rng.random(3)
return optimize.check_grad(optimize.rosen, optimize.rosen_der, x,
direction='random', **kwargs)
def random_array(self, **kwargs):
return sparse.random_array((10, 10), density=1.0, **kwargs).toarray()
def random(self, **kwargs):
return sparse.random(10, 10, density=1.0, **kwargs).toarray()
def rand(self, **kwargs):
return sparse.rand(10, 10, density=1.0, **kwargs).toarray()
def svds(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
A = rng.random((10, 10))
return sparse.linalg.svds(A, **kwargs)
def random_rotation(self, **kwargs):
return spatial.transform.Rotation.random(3, **kwargs).as_matrix()
def goodness_of_fit(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
data = rng.random(100)
return stats.goodness_of_fit(stats.laplace, data, **kwargs).pvalue
def permutation_test(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
data = tuple(rng.random((2, 100)))
def statistic(x, y, axis): return np.mean(x, axis=axis) - np.mean(y, axis=axis)
return stats.permutation_test(data, statistic, **kwargs).pvalue
def bootstrap(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
data = (rng.random(100),)
return stats.bootstrap(data, np.mean, **kwargs).confidence_interval
def dunnett(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
x, y, control = rng.random((3, 100))
return stats.dunnett(x, y, control=control, **kwargs).pvalue
def sobol_indices(self, **kwargs):
def f_ishigami(x): return (np.sin(x[0]) + 7 * np.sin(x[1]) ** 2
+ 0.1 * (x[2] ** 4) * np.sin(x[0]))
dists = [stats.uniform(loc=-np.pi, scale=2 * np.pi),
stats.uniform(loc=-np.pi, scale=2 * np.pi),
stats.uniform(loc=-np.pi, scale=2 * np.pi)]
res = stats.sobol_indices(func=f_ishigami, n=1024, dists=dists, **kwargs)
return res.first_order
def qmc_engine(self, engine, **kwargs):
qrng = engine(d=1, **kwargs)
return qrng.random(4)
def halton(self, **kwargs):
return self.qmc_engine(stats.qmc.Halton, **kwargs)
def sobol(self, **kwargs):
return self.qmc_engine(stats.qmc.Sobol, **kwargs)
def latin_hypercube(self, **kwargs):
return self.qmc_engine(stats.qmc.LatinHypercube, **kwargs)
def poisson_disk(self, **kwargs):
return self.qmc_engine(stats.qmc.PoissonDisk, **kwargs)
def multivariate_normal_qmc(self, **kwargs):
X = stats.qmc.MultivariateNormalQMC([0], **kwargs)
return X.random(4)
def multinomial_qmc(self, **kwargs):
X = stats.qmc.MultinomialQMC([0.5, 0.5], 4, **kwargs)
return X.random(4)
def permutation_method(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
data = tuple(rng.random((2, 100)))
method = stats.PermutationMethod(**kwargs)
return stats.pearsonr(*data, method=method).pvalue
def bootstrap_method(self, **kwargs):
rng = np.random.default_rng(3458934594269824562)
data = tuple(rng.random((2, 100)))
res = stats.pearsonr(*data)
method = stats.BootstrapMethod(**kwargs)
return res.confidence_interval(method=method)
@pytest.mark.fail_slow(10)
@pytest.mark.slow
@pytest.mark.parametrize("method, arg_name", [
(kmeans, "seed"),
(kmeans2, "seed"),
(barycentric, "random_state"),
(clarkson_woodruff_transform, "seed"),
(basinhopping, "seed"),
(differential_evolution, "seed"),
(dual_annealing, "seed"),
(check_grad, "seed"),
(random_array, 'random_state'),
(random, 'random_state'),
(rand, 'random_state'),
(random_rotation, "random_state"),
(goodness_of_fit, "random_state"),
(permutation_test, "random_state"),
(bootstrap, "random_state"),
(permutation_method, "random_state"),
(bootstrap_method, "random_state"),
(dunnett, "random_state"),
(sobol_indices, "random_state"),
(halton, "seed"),
(sobol, "seed"),
(latin_hypercube, "seed"),
(poisson_disk, "seed"),
(multivariate_normal_qmc, "seed"),
(multinomial_qmc, "seed"),
])
def test_rng_deterministic(self, method, arg_name):
np.random.seed(None)
seed = 2949672964
rng = np.random.default_rng(seed)
message = "got multiple values for argument now known as `rng`"
with pytest.raises(TypeError, match=message):
method(self, **{'rng': rng, arg_name: seed})
rng = np.random.default_rng(seed)
res1 = method(self, rng=rng)
res2 = method(self, rng=seed)
assert_equal(res2, res1)
if method.__name__ in {"dunnett", "sobol_indices"}:
# the two kwargs have essentially the same behavior for these functions
res3 = method(self, **{arg_name: seed})
assert_equal(res3, res1)
return
rng = np.random.RandomState(seed)
res1 = method(self, **{arg_name: rng})
res2 = method(self, **{arg_name: seed})
if method.__name__ in {"halton", "sobol", "latin_hypercube", "poisson_disk",
"multivariate_normal_qmc", "multinomial_qmc"}:
# For these, passing `random_state=RandomState(seed)` is not the same as
# passing integer `seed`.
res1b = method(self, **{arg_name: np.random.RandomState(seed)})
assert_equal(res1b, res1)
res2b = method(self, **{arg_name: seed})
assert_equal(res2b, res2)
return
np.random.seed(seed)
res3 = method(self, **{arg_name: None})
assert_equal(res2, res1)
assert_equal(res3, res1)
| TestTransitionToRNG |
python | numba__numba | numba/tests/test_mixed_tuple_unroller.py | {
"start": 40186,
"end": 48347
} | class ____(MemoryLeakMixin, TestCase):
def test_01(self):
@njit
def foo():
a = [12, 12.7, 3j, 4]
acc = 0
for i in range(len(literal_unroll(a))):
acc += a[i]
if acc.real < 26:
acc -= 1
else:
break
return acc
self.assertEqual(foo(), foo.py_func())
def test_02(self):
# same as test_1 but without the explicit loop canonicalisation
@njit
def foo():
x = [12, 12.7, 3j, 4]
acc = 0
for a in literal_unroll(x):
acc += a
if acc.real < 26:
acc -= 1
else:
break
return acc
self.assertEqual(foo(), foo.py_func())
def test_03(self):
# two unrolls
@njit
def foo():
x = [12, 12.7, 3j, 4]
y = ['foo', 8]
acc = 0
for a in literal_unroll(x):
acc += a
if acc.real < 26:
acc -= 1
else:
for t in literal_unroll(y):
acc += t is False
break
return acc
self.assertEqual(foo(), foo.py_func())
def test_04(self):
# two unrolls, one is a const list, one is a tuple
@njit
def foo():
x = [12, 12.7, 3j, 4]
y = ('foo', 8)
acc = 0
for a in literal_unroll(x):
acc += a
if acc.real < 26:
acc -= 1
else:
for t in literal_unroll(y):
acc += t is False
break
return acc
self.assertEqual(foo(), foo.py_func())
def test_05(self):
# illegal, list has to be const
@njit
def foo(tup1, tup2):
acc = 0
for a in literal_unroll(tup1):
if a[0] > 1:
acc += tup2[0].sum()
return acc
n = 10
tup1 = [np.zeros(10), np.zeros(10)]
tup2 = (np.ones((n,)), np.ones((n, n)), np.ones((n, n, n)),
np.ones((n, n, n, n)), np.ones((n, n, n, n, n)))
with self.assertRaises(errors.UnsupportedError) as raises:
foo(tup1, tup2)
msg = "Invalid use of literal_unroll with a function argument"
self.assertIn(msg, str(raises.exception))
def test_06(self):
# illegal: list containing non const
@njit
def foo():
n = 10
tup = [np.ones((n,)), np.ones((n, n)), "ABCDEFGHJI", (1, 2, 3),
(1, 'foo', 2, 'bar'), {3, 4, 5, 6, 7}]
acc = 0
for a in literal_unroll(tup):
acc += len(a)
return acc
with self.assertRaises(errors.UnsupportedError) as raises:
foo()
self.assertIn("Found non-constant value at position 0",
str(raises.exception))
def test_7(self):
# dispatch on literals triggering @overload resolution
def dt(value):
if value == "apple":
return 1
elif value == "orange":
return 2
elif value == "banana":
return 3
elif value == 0xca11ab1e:
return 0x5ca1ab1e + value
@overload(dt, inline='always')
def ol_dt(li):
if isinstance(li, types.StringLiteral):
value = li.literal_value
if value == "apple":
def impl(li):
return 1
elif value == "orange":
def impl(li):
return 2
elif value == "banana":
def impl(li):
return 3
return impl
elif isinstance(li, types.IntegerLiteral):
value = li.literal_value
if value == 0xca11ab1e:
def impl(li):
# close over the dispatcher :)
return 0x5ca1ab1e + value
return impl
@njit
def foo():
acc = 0
for t in literal_unroll(['apple', 'orange', 'banana', 3390155550]):
acc += dt(t)
return acc
self.assertEqual(foo(), foo.py_func())
def test_8(self):
@njit
def foo():
x = []
z = ['apple', 'orange', 'banana']
for i in range(len(literal_unroll(z))):
t = z[i]
if t == "apple":
x.append("0")
elif t == "orange":
x.append(t)
elif t == "banana":
x.append("2.0")
return x
self.assertEqual(foo(), foo.py_func())
def test_9(self):
# unroll the same target twice
@njit
def foo(idx, z):
a = [12, 12.7, 3j, 4]
acc = 0
for i in literal_unroll(a):
acc += i
if acc.real < 26:
acc -= 1
else:
for x in literal_unroll(a):
acc += x
break
if a[0] < 23:
acc += 2
return acc
f = 9
k = f
self.assertEqual(foo(2, k), foo.py_func(2, k))
def test_10(self):
# nesting unrolls is illegal
@njit
def foo(idx, z):
a = (12, 12.7, 3j, 4, z, 2 * z)
b = [12, 12.7, 3j, 4]
acc = 0
for i in literal_unroll(a):
acc += i
if acc.real < 26:
acc -= 1
else:
for x in literal_unroll(a):
for j in literal_unroll(b):
acc += j
acc += x
for x in literal_unroll(a):
acc += x
for x in literal_unroll(a):
acc += x
if a[0] < 23:
acc += 2
return acc
f = 9
k = f
with self.assertRaises(errors.UnsupportedError) as raises:
foo(2, k)
self.assertIn("Nesting of literal_unroll is unsupported",
str(raises.exception))
def test_11(self):
# homogeneous const list unroll can return derivative of the induction
# var
@njit
def foo():
x = [1, 2, 3, 4]
acc = 0
for a in literal_unroll(x):
acc += a
return a
self.assertEqual(foo(), foo.py_func())
def test_12(self):
# mixed unroll cannot return derivative of the induction var
@njit
def foo():
acc = 0
x = [1, 2, 'a']
for a in literal_unroll(x):
acc += bool(a)
return a
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn("Cannot unify", str(raises.exception))
def test_13(self):
# list slice is illegal
@njit
def foo():
x = [1000, 2000, 3000, 4000]
acc = 0
for a in literal_unroll(x[:2]):
acc += a
return acc
with self.assertRaises(errors.UnsupportedError) as raises:
foo()
self.assertIn("Invalid use of literal_unroll", str(raises.exception))
def test_14(self):
# list mutate is illegal
@njit
def foo():
x = [1000, 2000, 3000, 4000]
acc = 0
for a in literal_unroll(x):
acc += a
x.append(10)
return acc
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn("Unknown attribute 'append' of type Tuple",
str(raises.exception))
| TestConstListUnroll |
python | Netflix__metaflow | metaflow/cli_args.py | {
"start": 1279,
"end": 3584
} | class ____(object):
def __init__(self):
self._top_kwargs = {}
self._step_kwargs = {}
def _set_step_kwargs(self, kwargs):
self._step_kwargs = kwargs
def _set_top_kwargs(self, kwargs):
self._top_kwargs = kwargs
@property
def top_kwargs(self):
return self._top_kwargs
@property
def step_kwargs(self):
return self._step_kwargs
def step_command(
self, executable, script, step_name, top_kwargs=None, step_kwargs=None
):
cmd = [executable, "-u", script]
if top_kwargs is None:
top_kwargs = self._top_kwargs
if step_kwargs is None:
step_kwargs = self._step_kwargs
top_args_list = list(self._options(top_kwargs))
cmd.extend(top_args_list)
cmd.extend(["step", step_name])
step_args_list = list(self._options(step_kwargs))
cmd.extend(step_args_list)
return cmd
@staticmethod
def _options(mapping):
for k, v in mapping.items():
# None or False arguments are ignored
# v needs to be explicitly False, not falsy, e.g. 0 is an acceptable value
if v is None or v is False:
continue
# we need special handling for 'with' since it is a reserved
# keyword in Python, so we call it 'decospecs' in click args
if k == "decospecs":
k = "with"
if k in ("config", "config_value"):
# Special handling here since we gather them all in one option but actually
# need to send them one at a time using --config-value <name> kv.<name>.
# Note it can be either config or config_value depending
# on click processing order.
for config_name in v.keys():
yield "--config-value"
yield to_unicode(config_name)
yield to_unicode(ConfigInput.make_key_name(config_name))
continue
k = k.replace("_", "-")
v = v if isinstance(v, (list, tuple, set)) else [v]
for value in v:
yield "--%s" % k
if not isinstance(value, bool):
yield to_unicode(value)
cli_args = CLIArgs()
| CLIArgs |
python | facebookresearch__faiss | benchs/bench_gpu_1bn.py | {
"start": 4625,
"end": 22386
} | class ____:
"""a pre-processor is either a faiss.VectorTransform or an IndentPreproc"""
def __init__(self, d):
self.d_in = self.d_out = d
def apply_py(self, x):
return x
def sanitize(x):
""" convert array to a c-contiguous float array """
return np.ascontiguousarray(x.astype('float32'))
def dataset_iterator(x, preproc, bs):
""" iterate over the lines of x in blocks of size bs"""
nb = x.shape[0]
block_ranges = [(i0, min(nb, i0 + bs))
for i0 in range(0, nb, bs)]
def prepare_block(i01):
i0, i1 = i01
xb = sanitize(x[i0:i1])
return i0, preproc.apply_py(xb)
return rate_limited_imap(prepare_block, block_ranges)
def eval_intersection_measure(gt_I, I):
""" measure intersection measure (used for knngraph)"""
inter = 0
rank = I.shape[1]
assert gt_I.shape[1] >= rank
for q in range(nq_gt):
inter += faiss.ranklist_intersection_size(
rank, faiss.swig_ptr(gt_I[q, :]),
rank, faiss.swig_ptr(I[q, :].astype('int64')))
return inter / float(rank * nq_gt)
#################################################################
# Prepare dataset
#################################################################
print("Preparing dataset", dbname)
if dbname.startswith('SIFT'):
# SIFT1M to SIFT1000M
dbsize = int(dbname[4:-1])
xb = mmap_bvecs('bigann/bigann_base.bvecs')
xq = mmap_bvecs('bigann/bigann_query.bvecs')
xt = mmap_bvecs('bigann/bigann_learn.bvecs')
# trim xb to correct size
xb = xb[:dbsize * 1000 * 1000]
gt_I = ivecs_read('bigann/gnd/idx_%dM.ivecs' % dbsize)
elif dbname == 'Deep1B':
xb = mmap_fvecs('deep1b/base.fvecs')
xq = mmap_fvecs('deep1b/deep1B_queries.fvecs')
xt = mmap_fvecs('deep1b/learn.fvecs')
# deep1B's train is is outrageously big
xt = xt[:10 * 1000 * 1000]
gt_I = ivecs_read('deep1b/deep1B_groundtruth.ivecs')
else:
print('unknown dataset', dbname, file=sys.stderr)
sys.exit(1)
if knngraph:
# convert to knn-graph dataset
xq = xb
xt = xb
# we compute the ground-truth on this number of queries for validation
nq_gt = 10000
gt_sl = 100
# ground truth will be computed below
gt_I = None
print("sizes: B %s Q %s T %s gt %s" % (
xb.shape, xq.shape, xt.shape,
gt_I.shape if gt_I is not None else None))
#################################################################
# Parse index_key and set cache files
#
# The index_key is a valid factory key that would work, but we
# decompose the training to do it faster
#################################################################
pat = re.compile('(OPQ[0-9]+(_[0-9]+)?,|PCAR[0-9]+,)?' +
'(IVF[0-9]+),' +
'(PQ[0-9]+|Flat)')
matchobject = pat.match(index_key)
assert matchobject, 'could not parse ' + index_key
mog = matchobject.groups()
preproc_str = mog[0]
ivf_str = mog[2]
pqflat_str = mog[3]
ncent = int(ivf_str[3:])
prefix = ''
if knngraph:
gt_cachefile = '%s/BK_gt_%s.npy' % (cacheroot, dbname)
prefix = 'BK_'
# files must be kept distinct because the training set is not the
# same for the knngraph
if preproc_str:
preproc_cachefile = '%s/%spreproc_%s_%s.vectrans' % (
cacheroot, prefix, dbname, preproc_str[:-1])
else:
preproc_cachefile = None
preproc_str = ''
cent_cachefile = '%s/%scent_%s_%s%s.npy' % (
cacheroot, prefix, dbname, preproc_str, ivf_str)
index_cachefile = '%s/%s%s_%s%s,%s.index' % (
cacheroot, prefix, dbname, preproc_str, ivf_str, pqflat_str)
if not use_cache:
preproc_cachefile = None
cent_cachefile = None
index_cachefile = None
print("cachefiles:")
print(preproc_cachefile)
print(cent_cachefile)
print(index_cachefile)
#################################################################
# Wake up GPUs
#################################################################
print("preparing resources for %d GPUs" % ngpu)
gpu_resources = []
for _ in range(ngpu):
res = faiss.StandardGpuResources()
if tempmem >= 0:
res.setTempMemory(tempmem)
gpu_resources.append(res)
def make_vres_vdev(i0=0, i1=-1):
" return vectors of device ids and resources useful for gpu_multiple"
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
if i1 == -1:
i1 = ngpu
for i in range(i0, i1):
vdev.push_back(i)
vres.push_back(gpu_resources[i])
return vres, vdev
#################################################################
# Prepare ground truth (for the knngraph)
#################################################################
def compute_GT():
print("compute GT")
t0 = time.time()
gt_I = np.zeros((nq_gt, gt_sl), dtype='int64')
gt_D = np.zeros((nq_gt, gt_sl), dtype='float32')
heaps = faiss.float_maxheap_array_t()
heaps.k = gt_sl
heaps.nh = nq_gt
heaps.val = faiss.swig_ptr(gt_D)
heaps.ids = faiss.swig_ptr(gt_I)
heaps.heapify()
bs = 10 ** 5
n, d = xb.shape
xqs = sanitize(xq[:nq_gt])
db_gt = faiss.IndexFlatL2(d)
vres, vdev = make_vres_vdev()
db_gt_gpu = faiss.index_cpu_to_gpu_multiple(
vres, vdev, db_gt)
# compute ground-truth by blocks of bs, and add to heaps
for i0, xsl in dataset_iterator(xb, IdentPreproc(d), bs):
db_gt_gpu.add(xsl)
D, I = db_gt_gpu.search(xqs, gt_sl)
I += i0
heaps.addn_with_ids(
gt_sl, faiss.swig_ptr(D), faiss.swig_ptr(I), gt_sl)
db_gt_gpu.reset()
print("\r %d/%d, %.3f s" % (i0, n, time.time() - t0), end=' ')
print()
heaps.reorder()
print("GT time: %.3f s" % (time.time() - t0))
return gt_I
if knngraph:
if gt_cachefile and os.path.exists(gt_cachefile):
print("load GT", gt_cachefile)
gt_I = np.load(gt_cachefile)
else:
gt_I = compute_GT()
if gt_cachefile:
print("store GT", gt_cachefile)
np.save(gt_cachefile, gt_I)
#################################################################
# Prepare the vector transformation object (pure CPU)
#################################################################
def train_preprocessor():
print("train preproc", preproc_str)
d = xt.shape[1]
t0 = time.time()
if preproc_str.startswith('OPQ'):
fi = preproc_str[3:-1].split('_')
m = int(fi[0])
dout = int(fi[1]) if len(fi) == 2 else d
preproc = faiss.OPQMatrix(d, m, dout)
elif preproc_str.startswith('PCAR'):
dout = int(preproc_str[4:-1])
preproc = faiss.PCAMatrix(d, dout, 0, True)
else:
assert False
preproc.train(sanitize(xt[:1000000]))
print("preproc train done in %.3f s" % (time.time() - t0))
return preproc
def get_preprocessor():
if preproc_str:
if not preproc_cachefile or not os.path.exists(preproc_cachefile):
preproc = train_preprocessor()
if preproc_cachefile:
print("store", preproc_cachefile)
faiss.write_VectorTransform(preproc, preproc_cachefile)
else:
print("load", preproc_cachefile)
preproc = faiss.read_VectorTransform(preproc_cachefile)
else:
d = xb.shape[1]
preproc = IdentPreproc(d)
return preproc
#################################################################
# Prepare the coarse quantizer
#################################################################
def train_coarse_quantizer(x, k, preproc):
d = preproc.d_out
clus = faiss.Clustering(d, k)
clus.verbose = True
# clus.niter = 2
clus.max_points_per_centroid = 10000000
print("apply preproc on shape", x.shape, 'k=', k)
t0 = time.time()
x = preproc.apply_py(sanitize(x))
print(" preproc %.3f s output shape %s" % (
time.time() - t0, x.shape))
vres, vdev = make_vres_vdev()
index = faiss.index_cpu_to_gpu_multiple(
vres, vdev, faiss.IndexFlatL2(d))
clus.train(x, index)
centroids = faiss.vector_float_to_array(clus.centroids)
return centroids.reshape(k, d)
def prepare_coarse_quantizer(preproc):
if cent_cachefile and os.path.exists(cent_cachefile):
print("load centroids", cent_cachefile)
centroids = np.load(cent_cachefile)
else:
nt = max(1000000, 256 * ncent)
print("train coarse quantizer...")
t0 = time.time()
centroids = train_coarse_quantizer(xt[:nt], ncent, preproc)
print("Coarse train time: %.3f s" % (time.time() - t0))
if cent_cachefile:
print("store centroids", cent_cachefile)
np.save(cent_cachefile, centroids)
coarse_quantizer = faiss.IndexFlatL2(preproc.d_out)
coarse_quantizer.add(centroids)
return coarse_quantizer
#################################################################
# Make index and add elements to it
#################################################################
def prepare_trained_index(preproc):
coarse_quantizer = prepare_coarse_quantizer(preproc)
d = preproc.d_out
if pqflat_str == 'Flat':
print("making an IVFFlat index")
idx_model = faiss.IndexIVFFlat(coarse_quantizer, d, ncent,
faiss.METRIC_L2)
else:
m = int(pqflat_str[2:])
assert m < 56 or use_float16, "PQ%d will work only with -float16" % m
print("making an IVFPQ index, m = ", m)
idx_model = faiss.IndexIVFPQ(coarse_quantizer, d, ncent, m, 8)
coarse_quantizer.this.disown()
idx_model.own_fields = True
# finish training on CPU
t0 = time.time()
print("Training vector codes")
x = preproc.apply_py(sanitize(xt[:1000000]))
idx_model.train(x)
print(" done %.3f s" % (time.time() - t0))
return idx_model
def compute_populated_index(preproc):
"""Add elements to a sharded index. Return the index and if available
a sharded gpu_index that contains the same data. """
indexall = prepare_trained_index(preproc)
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = use_float16
co.useFloat16CoarseQuantizer = False
co.usePrecomputed = use_precomputed_tables
co.indicesOptions = faiss.INDICES_CPU
co.verbose = True
co.reserveVecs = max_add if max_add > 0 else xb.shape[0]
co.shard = True
assert co.shard_type in (0, 1, 2)
vres, vdev = make_vres_vdev()
gpu_index = faiss.index_cpu_to_gpu_multiple(
vres, vdev, indexall, co)
print("add...")
t0 = time.time()
nb = xb.shape[0]
for i0, xs in dataset_iterator(xb, preproc, add_batch_size):
i1 = i0 + xs.shape[0]
gpu_index.add_with_ids(xs, np.arange(i0, i1))
if max_add > 0 and gpu_index.ntotal > max_add:
print("Flush indexes to CPU")
for i in range(ngpu):
index_src_gpu = faiss.downcast_index(gpu_index.at(i))
index_src = faiss.index_gpu_to_cpu(index_src_gpu)
print(" index %d size %d" % (i, index_src.ntotal))
index_src.copy_subset_to(indexall, 0, 0, nb)
index_src_gpu.reset()
index_src_gpu.reserveMemory(max_add)
gpu_index.sync_with_shard_indexes()
print('\r%d/%d (%.3f s) ' % (
i0, nb, time.time() - t0), end=' ')
sys.stdout.flush()
print("Add time: %.3f s" % (time.time() - t0))
print("Aggregate indexes to CPU")
t0 = time.time()
if hasattr(gpu_index, 'at'):
# it is a sharded index
for i in range(ngpu):
index_src = faiss.index_gpu_to_cpu(gpu_index.at(i))
print(" index %d size %d" % (i, index_src.ntotal))
index_src.copy_subset_to(indexall, 0, 0, nb)
else:
# simple index
index_src = faiss.index_gpu_to_cpu(gpu_index)
index_src.copy_subset_to(indexall, 0, 0, nb)
print(" done in %.3f s" % (time.time() - t0))
if max_add > 0:
# it does not contain all the vectors
gpu_index = None
return gpu_index, indexall
def compute_populated_index_2(preproc):
indexall = prepare_trained_index(preproc)
# set up a 3-stage pipeline that does:
# - stage 1: load + preproc
# - stage 2: assign on GPU
# - stage 3: add to index
stage1 = dataset_iterator(xb, preproc, add_batch_size)
vres, vdev = make_vres_vdev()
coarse_quantizer_gpu = faiss.index_cpu_to_gpu_multiple(
vres, vdev, indexall.quantizer)
def quantize(args):
(i0, xs) = args
_, assign = coarse_quantizer_gpu.search(xs, 1)
return i0, xs, assign.ravel()
stage2 = rate_limited_imap(quantize, stage1)
print("add...")
t0 = time.time()
nb = xb.shape[0]
for i0, xs, assign in stage2:
i1 = i0 + xs.shape[0]
if indexall.__class__ == faiss.IndexIVFPQ:
indexall.add_core_o(i1 - i0, faiss.swig_ptr(xs),
None, None, faiss.swig_ptr(assign))
elif indexall.__class__ == faiss.IndexIVFFlat:
indexall.add_core(i1 - i0, faiss.swig_ptr(xs), None,
faiss.swig_ptr(assign))
else:
assert False
print('\r%d/%d (%.3f s) ' % (
i0, nb, time.time() - t0), end=' ')
sys.stdout.flush()
print("Add time: %.3f s" % (time.time() - t0))
return None, indexall
def get_populated_index(preproc):
if not index_cachefile or not os.path.exists(index_cachefile):
if not altadd:
gpu_index, indexall = compute_populated_index(preproc)
else:
gpu_index, indexall = compute_populated_index_2(preproc)
if index_cachefile:
print("store", index_cachefile)
faiss.write_index(indexall, index_cachefile)
else:
print("load", index_cachefile)
indexall = faiss.read_index(index_cachefile)
gpu_index = None
co = faiss.GpuMultipleClonerOptions()
co.useFloat16 = use_float16
co.useFloat16CoarseQuantizer = False
co.usePrecomputed = use_precomputed_tables
co.indicesOptions = 0
co.verbose = True
co.shard = True # the replicas will be made "manually"
t0 = time.time()
print("CPU index contains %d vectors, move to GPU" % indexall.ntotal)
if replicas == 1:
if not gpu_index:
print("copying loaded index to GPUs")
vres, vdev = make_vres_vdev()
index = faiss.index_cpu_to_gpu_multiple(
vres, vdev, indexall, co)
else:
index = gpu_index
else:
del gpu_index # We override the GPU index
print("Copy CPU index to %d sharded GPU indexes" % replicas)
index = faiss.IndexReplicas()
for i in range(replicas):
gpu0 = ngpu * i / replicas
gpu1 = ngpu * (i + 1) / replicas
vres, vdev = make_vres_vdev(gpu0, gpu1)
print(" dispatch to GPUs %d:%d" % (gpu0, gpu1))
index1 = faiss.index_cpu_to_gpu_multiple(
vres, vdev, indexall, co)
index1.this.disown()
index.addIndex(index1)
index.own_fields = True
del indexall
print("move to GPU done in %.3f s" % (time.time() - t0))
return index
#################################################################
# Perform search
#################################################################
def eval_dataset(index, preproc):
ps = faiss.GpuParameterSpace()
ps.initialize(index)
nq_gt = gt_I.shape[0]
print("search...")
sl = query_batch_size
nq = xq.shape[0]
for nprobe in nprobes:
ps.set_index_parameter(index, 'nprobe', nprobe)
t0 = time.time()
if sl == 0:
D, I = index.search(preproc.apply_py(sanitize(xq)), nnn)
else:
I = np.empty((nq, nnn), dtype='int32')
D = np.empty((nq, nnn), dtype='float32')
inter_res = ''
for i0, xs in dataset_iterator(xq, preproc, sl):
print('\r%d/%d (%.3f s%s) ' % (
i0, nq, time.time() - t0, inter_res), end=' ')
sys.stdout.flush()
i1 = i0 + xs.shape[0]
Di, Ii = index.search(xs, nnn)
I[i0:i1] = Ii
D[i0:i1] = Di
if knngraph and not inter_res and i1 >= nq_gt:
ires = eval_intersection_measure(
gt_I[:, :nnn], I[:nq_gt])
inter_res = ', %.4f' % ires
t1 = time.time()
if knngraph:
ires = eval_intersection_measure(gt_I[:, :nnn], I[:nq_gt])
print(" probe=%-3d: %.3f s rank-%d intersection results: %.4f" % (
nprobe, t1 - t0, nnn, ires))
else:
print(" probe=%-3d: %.3f s" % (nprobe, t1 - t0), end=' ')
gtc = gt_I[:, :1]
nq = xq.shape[0]
for rank in (1, 10, 100):
if rank > nnn:
continue
nok = (I[:, :rank] == gtc).sum()
print("1-R@%d: %.4f" % (rank, nok / float(nq)), end=' ')
print()
if I_fname:
I_fname_i = I_fname % I
print("storing", I_fname_i)
np.save(I, I_fname_i)
if D_fname:
D_fname_i = I_fname % I
print("storing", D_fname_i)
np.save(D, D_fname_i)
#################################################################
# Driver
#################################################################
preproc = get_preprocessor()
index = get_populated_index(preproc)
eval_dataset(index, preproc)
# make sure index is deleted before the resources
del index
| IdentPreproc |
python | pandas-dev__pandas | pandas/io/formats/printing.py | {
"start": 16352,
"end": 16511
} | class ____(dict[_KT, _VT]):
"""Dict extension to support abbreviated __repr__"""
def __repr__(self) -> str:
return pprint_thing(self)
| PrettyDict |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/callables.py | {
"start": 637,
"end": 951
} | class ____(AbstractEventProcessor[EPInputType, EPOutputType]):
async def async_run(self) -> None:
_test_sink(self.benign)
async def async_call_tainted(self) -> None:
_test_sink(self.tainted)
PIInputType = TypeVar("PIInputType")
PIOutputType = TypeVar("PIOutputType")
| ConcreteEventProcessor |
python | coleifer__peewee | tests/psycopg3_ext.py | {
"start": 1214,
"end": 1268
} | class ____(TestModel):
data = BinaryJSONField()
| BJson |
python | kamyu104__LeetCode-Solutions | Python/find-the-largest-almost-missing-integer.py | {
"start": 63,
"end": 687
} | class ____(object):
def largestInteger(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if k == len(nums):
return max(nums)
cnt = collections.defaultdict(int)
for x in nums:
cnt[x] += 1
if k == 1:
return max(x for x, v in cnt.iteritems() if v == 1) if any(v == 1 for v in cnt.itervalues()) else -1
result = -1
if cnt[nums[0]] == 1:
result = max(result, nums[0])
if cnt[nums[-1]] == 1:
result = max(result, nums[-1])
return result
| Solution |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/hooks/test_spark_sql.py | {
"start": 1356,
"end": 8816
} | class ____:
_config = {
"conn_id": "spark_default",
"executor_cores": 4,
"executor_memory": "22g",
"keytab": "privileged_user.keytab",
"name": "spark-job",
"num_executors": 10,
"verbose": True,
"sql": " /path/to/sql/file.sql ",
"conf": {"key": "value", "PROP": "VALUE"},
}
_config_str = {
"conn_id": "spark_default",
"executor_cores": 4,
"executor_memory": "22g",
"keytab": "privileged_user.keytab",
"name": "spark-job",
"num_executors": 10,
"verbose": True,
"sql": " /path/to/sql/file.sql ",
"conf": "key=value,PROP=VALUE",
}
@classmethod
def setup_class(cls) -> None:
clear_test_connections(add_default_connections_back=False)
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(conn_id="spark_default", conn_type="spark", host="yarn://yarn-master")
)
@classmethod
def teardown_class(cls) -> None:
clear_test_connections(add_default_connections_back=True)
@pytest.mark.db_test
def test_build_command(self):
hook = SparkSqlHook(**self._config)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = " ".join(hook._prepare_command(""))
# Check all the parameters
assert f"--executor-cores {self._config['executor_cores']}" in cmd
assert f"--executor-memory {self._config['executor_memory']}" in cmd
assert f"--keytab {self._config['keytab']}" in cmd
assert f"--name {self._config['name']}" in cmd
assert f"--num-executors {self._config['num_executors']}" in cmd
sql_path = get_after("-f", hook._prepare_command(""))
assert self._config["sql"].strip() == sql_path
# Check if all config settings are there
for k, v in self._config["conf"].items():
assert f"--conf {k}={v}" in cmd
if self._config["verbose"]:
assert "--verbose" in cmd
@pytest.mark.db_test
def test_build_command_with_str_conf(self):
hook = SparkSqlHook(**self._config_str)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = " ".join(hook._prepare_command(""))
# Check all the parameters
assert f"--executor-cores {self._config_str['executor_cores']}" in cmd
assert f"--executor-memory {self._config_str['executor_memory']}" in cmd
assert f"--keytab {self._config_str['keytab']}" in cmd
assert f"--name {self._config_str['name']}" in cmd
assert f"--num-executors {self._config_str['num_executors']}" in cmd
sql_path = get_after("-f", hook._prepare_command(""))
assert self._config_str["sql"].strip() == sql_path
# Check if all config settings are there
for key_value in self._config_str["conf"].split(","):
k, v = key_value.split("=")
assert f"--conf {k}={v}" in cmd
if self._config["verbose"]:
assert "--verbose" in cmd
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_sql.subprocess.Popen")
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = StringIO("Spark-sql communicates using stdout")
mock_popen.return_value.stderr = StringIO("stderr")
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(conn_id="spark_default", sql="SELECT 1")
with patch.object(hook.log, "debug") as mock_debug:
with patch.object(hook.log, "info") as mock_info:
hook.run_query()
mock_debug.assert_called_once_with(
"Spark-Sql cmd: %s",
[
"spark-sql",
"-e",
"SELECT 1",
"--master",
"yarn://yarn-master",
"--name",
"default-name",
"--verbose",
"--queue",
"default",
],
)
mock_info.assert_called_once_with("Spark-sql communicates using stdout")
# Then
assert mock_popen.mock_calls[0] == call(
[
"spark-sql",
"-e",
"SELECT 1",
"--master",
"yarn://yarn-master",
"--name",
"default-name",
"--verbose",
"--queue",
"default",
],
stderr=-2,
stdout=-1,
universal_newlines=True,
)
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_sql.subprocess.Popen")
def test_spark_process_runcmd_with_str(self, mock_popen):
# Given
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(conn_id="spark_default", sql="SELECT 1")
hook.run_query("--deploy-mode cluster")
# Then
assert mock_popen.mock_calls[0] == call(
[
"spark-sql",
"-e",
"SELECT 1",
"--master",
"yarn://yarn-master",
"--name",
"default-name",
"--verbose",
"--queue",
"default",
"--deploy-mode",
"cluster",
],
stderr=-2,
stdout=-1,
universal_newlines=True,
)
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_sql.subprocess.Popen")
def test_spark_process_runcmd_with_list(self, mock_popen):
# Given
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(conn_id="spark_default", sql="SELECT 1")
hook.run_query(["--deploy-mode", "cluster"])
# Then
assert mock_popen.mock_calls[0] == call(
[
"spark-sql",
"-e",
"SELECT 1",
"--master",
"yarn://yarn-master",
"--name",
"default-name",
"--verbose",
"--queue",
"default",
"--deploy-mode",
"cluster",
],
stderr=-2,
stdout=-1,
universal_newlines=True,
)
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_sql.subprocess.Popen")
def test_spark_process_runcmd_and_fail(self, mock_popen):
# Given
sql = "SELECT 1"
master = "local"
params = "--deploy-mode cluster"
status = 1
mock_popen.return_value.wait.return_value = status
# When
hook = SparkSqlHook(
conn_id="spark_default",
sql=sql,
master=master,
)
with pytest.raises(AirflowException) as ctx:
hook.run_query(params)
# Then
assert str(ctx.value) == (
f"Cannot execute '{sql}' on {master} (additional parameters: '{params}'). "
f"Process exit code: {status}."
)
| TestSparkSqlHook |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 3044,
"end": 19963
} | class ____(Mapping):
r"""
Container for sequences of colors that are known to Matplotlib by name.
The universal registry instance is `matplotlib.color_sequences`. There
should be no need for users to instantiate `.ColorSequenceRegistry`
themselves.
Read access uses a dict-like interface mapping names to lists of colors::
import matplotlib as mpl
colors = mpl.color_sequences['tab10']
For a list of built in color sequences, see :doc:`/gallery/color/color_sequences`.
The returned lists are copies, so that their modification does not change
the global definition of the color sequence.
Additional color sequences can be added via
`.ColorSequenceRegistry.register`::
mpl.color_sequences.register('rgb', ['r', 'g', 'b'])
"""
_BUILTIN_COLOR_SEQUENCES = {
'tab10': _cm._tab10_data,
'tab20': _cm._tab20_data,
'tab20b': _cm._tab20b_data,
'tab20c': _cm._tab20c_data,
'Pastel1': _cm._Pastel1_data,
'Pastel2': _cm._Pastel2_data,
'Paired': _cm._Paired_data,
'Accent': _cm._Accent_data,
'Dark2': _cm._Dark2_data,
'Set1': _cm._Set1_data,
'Set2': _cm._Set2_data,
'Set3': _cm._Set3_data,
'petroff6': _cm._petroff6_data,
'petroff8': _cm._petroff8_data,
'petroff10': _cm._petroff10_data,
}
def __init__(self):
self._color_sequences = {**self._BUILTIN_COLOR_SEQUENCES}
def __getitem__(self, item):
try:
return list(self._color_sequences[item])
except KeyError:
raise KeyError(f"{item!r} is not a known color sequence name")
def __iter__(self):
return iter(self._color_sequences)
def __len__(self):
return len(self._color_sequences)
def __str__(self):
return ('ColorSequenceRegistry; available colormaps:\n' +
', '.join(f"'{name}'" for name in self))
def register(self, name, color_list):
"""
Register a new color sequence.
The color sequence registry stores a copy of the given *color_list*, so
that future changes to the original list do not affect the registered
color sequence. Think of this as the registry taking a snapshot
of *color_list* at registration.
Parameters
----------
name : str
The name for the color sequence.
color_list : list of :mpltype:`color`
An iterable returning valid Matplotlib colors when iterating over.
Note however that the returned color sequence will always be a
list regardless of the input type.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(f"{name!r} is a reserved name for a builtin "
"color sequence")
color_list = list(color_list) # force copy and coerce type to list
for color in color_list:
try:
to_rgba(color)
except ValueError:
raise ValueError(
f"{color!r} is not a valid color specification")
self._color_sequences[name] = color_list
def unregister(self, name):
"""
Remove a sequence from the registry.
You cannot remove built-in color sequences.
If the name is not registered, returns with no error.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(
f"Cannot unregister builtin color sequence {name!r}")
self._color_sequences.pop(name, None)
_color_sequences = ColorSequenceRegistry()
def _sanitize_extrema(ex):
if ex is None:
return ex
try:
ret = ex.item()
except AttributeError:
ret = float(ex)
return ret
_nth_color_re = re.compile(r"\AC[0-9]+\Z")
def _is_nth_color(c):
"""Return whether *c* can be interpreted as an item in the color cycle."""
return isinstance(c, str) and _nth_color_re.match(c)
def is_color_like(c):
"""Return whether *c* as a valid Matplotlib :mpltype:`color` specifier."""
# Special-case nth color syntax because it cannot be parsed during setup.
if _is_nth_color(c):
return True
try:
to_rgba(c)
except (TypeError, ValueError):
return False
else:
return True
def _has_alpha_channel(c):
"""
Return whether *c* is a color with an alpha channel.
If *c* is not a valid color specifier, then the result is undefined.
"""
# The following logic uses the assumption that c is a valid color spec.
# For speed and simplicity, we intentionally don't care about other inputs.
# Anything can happen with them.
# if c is a hex, it has an alpha channel when it has 4 (or 8) digits after '#'
if isinstance(c, str):
if c[0] == '#' and (len(c) == 5 or len(c) == 9):
# example: '#fff8' or '#0f0f0f80'
return True
else:
# if c isn't a string, it can be an RGB(A) or a color-alpha tuple
# if it has length 4, it has an alpha channel
if len(c) == 4:
# example: [0.5, 0.5, 0.5, 0.5]
return True
# if it has length 2, it's a color/alpha tuple
# if the second element isn't None or the first element has length = 4
if len(c) == 2 and (c[1] is not None or _has_alpha_channel(c[0])):
# example: ([0.5, 0.5, 0.5, 0.5], None) or ('r', 0.5)
return True
# otherwise it doesn't have an alpha channel
return False
def _check_color_like(**kwargs):
"""
For each *key, value* pair in *kwargs*, check that *value* is color-like.
"""
for k, v in kwargs.items():
if not is_color_like(v):
raise ValueError(
f"{v!r} is not a valid value for {k}: supported inputs are "
f"(r, g, b) and (r, g, b, a) 0-1 float tuples; "
f"'#rrggbb', '#rrggbbaa', '#rgb', '#rgba' strings; "
f"named color strings; "
f"string reprs of 0-1 floats for grayscale values; "
f"'C0', 'C1', ... strings for colors of the color cycle; "
f"and pairs combining one of the above with an alpha value")
def same_color(c1, c2):
"""
Return whether the colors *c1* and *c2* are the same.
*c1*, *c2* can be single colors or lists/arrays of colors.
"""
c1 = to_rgba_array(c1)
c2 = to_rgba_array(c2)
n1 = max(c1.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
n2 = max(c2.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
if n1 != n2:
raise ValueError('Different number of elements passed.')
# The following shape test is needed to correctly handle comparisons with
# 'none', which results in a shape (0, 4) array and thus cannot be tested
# via value comparison.
return c1.shape == c2.shape and (c1 == c2).all()
def to_rgba(c, alpha=None):
"""
Convert *c* to an RGBA color.
Parameters
----------
c : :mpltype:`color` or ``np.ma.masked``
alpha : float, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
Returns
-------
tuple
Tuple of floats ``(r, g, b, a)``, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
if isinstance(c, tuple) and len(c) == 2:
if alpha is None:
c, alpha = c
else:
c = c[0]
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
prop_cycler = mpl.rcParams['axes.prop_cycle']
colors = prop_cycler.by_key().get('color', ['k'])
c = colors[int(c[1:]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = None
if rgba is None: # Suppress exception chaining of cache lookup failure.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
def _to_rgba_no_colorcycle(c, alpha=None):
"""
Convert *c* to an RGBA color, with no support for color-cycle syntax.
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*. Otherwise, the alpha value from *c* is used, if it has alpha
information, or defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
"""
if alpha is not None and not 0 <= alpha <= 1:
raise ValueError("'alpha' must be between 0 and 1, inclusive")
orig_c = c
if c is np.ma.masked:
return (0., 0., 0., 0.)
if isinstance(c, str):
if c.lower() == "none":
return (0., 0., 0., 0.)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c]
except KeyError:
if len(c) != 1:
try:
c = _colors_full_map[c.lower()]
except KeyError:
pass
if isinstance(c, str):
if re.fullmatch("#[a-fA-F0-9]+", c):
if len(c) == 7: # #rrggbb hex format.
return (*[n / 0xff for n in bytes.fromhex(c[1:])],
alpha if alpha is not None else 1.)
elif len(c) == 4: # #rgb hex format, shorthand for #rrggbb.
return (*[int(n, 16) / 0xf for n in c[1:]],
alpha if alpha is not None else 1.)
elif len(c) == 9: # #rrggbbaa hex format.
color = [n / 0xff for n in bytes.fromhex(c[1:])]
if alpha is not None:
color[-1] = alpha
return tuple(color)
elif len(c) == 5: # #rgba hex format, shorthand for #rrggbbaa.
color = [int(n, 16) / 0xf for n in c[1:]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
else:
raise ValueError(f"Invalid hex color specifier: {orig_c!r}")
# string gray.
try:
c = float(c)
except ValueError:
pass
else:
if not (0 <= c <= 1):
raise ValueError(
f"Invalid string grayscale value {orig_c!r}. "
f"Value must be within 0-1 range")
return c, c, c, alpha if alpha is not None else 1.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# turn 2-D array into 1-D array
if isinstance(c, np.ndarray):
if c.ndim == 2 and c.shape[0] == 1:
c = c.reshape(-1)
# tuple color.
if not np.iterable(c):
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if not all(isinstance(x, Real) for x in c):
# Checks that don't work: `map(float, ...)`, `np.array(..., float)` and
# `np.array(...).astype(float)` would all convert "0.5" to 0.5.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# Return a tuple to prevent the cached value from being modified.
c = tuple(map(float, c))
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
def to_rgba_array(c, alpha=None):
"""
Convert *c* to a (n, 4) array of RGBA colors.
Parameters
----------
c : :mpltype:`color` or list of :mpltype:`color` or RGB(A) array
If *c* is a masked array, an `~numpy.ndarray` is returned with a
(0, 0, 0, 0) row for each masked value or row in *c*.
alpha : float or sequence of floats, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
If *alpha* is a sequence and *c* is a single color, *c* will be
repeated to match the length of *alpha*.
Returns
-------
array
(n, 4) array of RGBA colors, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
if isinstance(c, tuple) and len(c) == 2 and isinstance(c[1], Real):
if alpha is None:
c, alpha = c
else:
c = c[0]
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if np.iterable(alpha):
alpha = np.asarray(alpha).ravel()
if (isinstance(c, np.ndarray) and c.dtype.kind in "if"
and c.ndim == 2 and c.shape[1] in [3, 4]):
mask = c.mask.any(axis=1) if np.ma.is_masked(c) else None
c = np.ma.getdata(c)
if np.iterable(alpha):
if c.shape[0] == 1 and alpha.shape[0] > 1:
c = np.tile(c, (alpha.shape[0], 1))
elif c.shape[0] != alpha.shape[0]:
raise ValueError("The number of colors must match the number"
" of alpha values if there are more than one"
" of each.")
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if mask is not None:
result[mask] = 0
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Handle single values.
# Note that this occurs *after* handling inputs that are already arrays, as
# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
# to format the array in the ValueError message(!).
if cbook._str_lower_equal(c, "none"):
return np.zeros((0, 4), float)
try:
if np.iterable(alpha):
return np.array([to_rgba(c, a) for a in alpha], float)
else:
return np.array([to_rgba(c, alpha)], float)
except TypeError:
pass
except ValueError as e:
if e.args == ("'alpha' must be between 0 and 1, inclusive", ):
# ValueError is from _to_rgba_no_colorcycle().
raise e
if isinstance(c, str):
raise ValueError(f"{c!r} is not a valid color value.")
if len(c) == 0:
return np.zeros((0, 4), float)
# Quick path if the whole sequence can be directly converted to a numpy
# array in one shot.
if isinstance(c, Sequence):
lens = {len(cc) if isinstance(cc, (list, tuple)) else -1 for cc in c}
if lens == {3}:
rgba = np.column_stack([c, np.ones(len(c))])
elif lens == {4}:
rgba = np.array(c)
else:
rgba = np.array([to_rgba(cc) for cc in c])
else:
rgba = np.array([to_rgba(cc) for cc in c])
if alpha is not None:
rgba[:, 3] = alpha
if isinstance(c, Sequence):
# ensure that an explicit alpha does not overwrite full transparency
# for "none"
none_mask = [cbook._str_equal(cc, "none") for cc in c]
rgba[:, 3][none_mask] = 0
return rgba
def to_rgb(c):
"""
Convert the :mpltype:`color` *c* to an RGB color tuple.
If c has an alpha channel value specified, that is silently dropped.
"""
return to_rgba(c)[:3]
def to_hex(c, keep_alpha=False):
"""
Convert *c* to a hex color.
Parameters
----------
c : :mpltype:`color` or `numpy.ma.masked`
keep_alpha : bool, default: False
If False, use the ``#rrggbb`` format, otherwise use ``#rrggbbaa``.
Returns
-------
str
``#rrggbb`` or ``#rrggbbaa`` hex color string
"""
c = to_rgba(c)
if not keep_alpha:
c = c[:3]
return "#" + "".join(format(round(val * 255), "02x") for val in c)
### Backwards-compatible color-conversion API
cnames = CSS4_COLORS
hexColorPattern = re.compile(r"\A#[a-fA-F0-9]{6}\Z")
rgb2hex = to_hex
hex2color = to_rgb
| ColorSequenceRegistry |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 15881,
"end": 16106
} | class ____(PublicKeyMsg):
type: KeyAlgo
enc_a: binary_string
comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
@dataclasses.dataclass(order=True, slots=True)
| Ed25519PublicKeyMsg |
python | streamlit__streamlit | lib/tests/streamlit/elements/date_input_test.py | {
"start": 1190,
"end": 19224
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall date_input protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.date_input("the label")
c = self.get_delta_from_queue().new_element.date_input
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert (
datetime.strptime(c.default[0], "%Y/%m/%d").date() <= datetime.now().date()
)
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.date_input("the label", disabled=True)
c = self.get_delta_from_queue().new_element.date_input
assert c.disabled
def test_none_value(self):
"""Test that it can be called with None as value."""
st.date_input("the label", value=None)
c = self.get_delta_from_queue().new_element.date_input
assert c.label == "the label"
# If a proto property is null is not determined by this value,
# but by the check via the HasField method:
assert c.default == []
@parameterized.expand(
[
# Epoch
(date(1970, 1, 1), ["1970/01/01"]),
# All scalar types
(date(1971, 2, 3), ["1971/02/03"]),
(datetime(2019, 7, 6, 21, 15), ["2019/07/06"]),
("1971-02-03", ["1971/02/03"]),
("1971-02-03 12:34:56", ["1971/02/03"]),
# Lists
([], []),
([datetime(2019, 7, 6, 21, 15)], ["2019/07/06"]),
(
[date(2019, 7, 6), date(2020, 8, 7)],
["2019/07/06", "2020/08/07"],
),
(
[datetime(2019, 7, 6, 21, 15), datetime(2020, 8, 7, 21, 15)],
["2019/07/06", "2020/08/07"],
),
(
["2019-07-06", "2020-08-07"],
["2019/07/06", "2020/08/07"],
),
# Mixed list
(
[date(2019, 7, 6), datetime(2020, 8, 7, 21, 15)],
["2019/07/06", "2020/08/07"],
),
]
)
def test_value_types(self, arg_value, proto_value):
"""Test that it supports different types of values."""
st.date_input("the label", arg_value)
c = self.get_delta_from_queue().new_element.date_input
assert c.label == "the label"
assert c.default == proto_value
@parameterized.expand(
[
("2000-01-02", "1999-10-11", "2001-02-03"),
("2000-01-02", "1999-10-11 12:34:56", "2001-02-03 11:22:33"),
("2000-01-02", date(1999, 10, 11), date(2001, 2, 3)),
("2000-01-02", datetime(1999, 10, 11), datetime(2001, 2, 3)),
]
)
def test_min_max_value_types(self, arg_value, min_date_value, max_date_value):
"""Test the datatypes accepted by min_value/max_value."""
st.date_input("the label", arg_value, min_date_value, max_date_value)
c = self.get_delta_from_queue().new_element.date_input
assert c.label == "the label"
assert c.min == "1999/10/11"
assert c.max == "2001/02/03"
@parameterized.expand(
[
(date(1961, 4, 12), "1951/04/12", "1971/04/12"),
(date(2020, 2, 29), "2010/02/28", "2030/02/28"),
# TODO: Find a way to mock date.today()
# Add test for empty value list case
([date(2021, 4, 26)], "2011/04/26", "2031/04/26"),
([date(2007, 2, 4), date(2012, 1, 3)], "1997/02/04", "2022/01/03"),
]
)
def test_min_max_values(self, arg_value, min_date_value, max_date_value):
"""Test that it calculates min, max date value range if not provided"""
st.date_input("the label", arg_value)
c = self.get_delta_from_queue().new_element.date_input
assert c.label == "the label"
assert c.min == min_date_value
assert c.max == max_date_value
@parameterized.expand(
[
(
TODAY,
TODAY + timedelta(days=7),
TODAY + timedelta(days=14),
),
(
TODAY + timedelta(days=8),
TODAY,
TODAY + timedelta(days=7),
),
(
[TODAY, TODAY + timedelta(2)],
TODAY + timedelta(days=7),
TODAY + timedelta(days=14),
),
(
[TODAY, TODAY + timedelta(8)],
TODAY + timedelta(days=7),
TODAY + timedelta(days=14),
),
(
[TODAY, TODAY + timedelta(8)],
TODAY,
TODAY + timedelta(days=7),
),
]
)
def test_value_out_of_range(self, value, min_date, max_date):
with pytest.raises(StreamlitAPIException) as exc_message:
st.date_input(
"the label", value=value, min_value=min_date, max_value=max_date
)
if isinstance(value, (date, datetime)):
value = [value]
value = [v.date() if isinstance(v, datetime) else v for v in value]
assert (
f"The default `value` of {value} must lie between the `min_value` of {min_date.date()} "
f"and the `max_value` of {max_date.date()}, inclusively."
== str(exc_message.value)
)
@parameterized.expand(
[
(TODAY, TODAY, TODAY + timedelta(days=14)),
(
TODAY + timedelta(days=14),
TODAY,
TODAY + timedelta(days=14),
),
(
TODAY + timedelta(days=10),
TODAY,
TODAY + timedelta(days=14),
),
(
[TODAY + timedelta(1), TODAY + timedelta(2)],
TODAY,
TODAY + timedelta(days=14),
),
(
[TODAY, TODAY + timedelta(14)],
TODAY,
TODAY + timedelta(days=14),
),
]
)
def test_value_in_range(self, value, min_date, max_date):
st.date_input("the label", value=value, min_value=min_date, max_value=max_date)
# No need to assert anything. Testing if not throwing an error.
def test_default_min_if_today_is_before_min(self):
min_date = date(9998, 2, 28)
st.date_input("the label", min_value=min_date, max_value=date(9999, 2, 28))
c = self.get_delta_from_queue().new_element.date_input
assert datetime.strptime(c.default[0], "%Y/%m/%d").date() == min_date
def test_default_max_if_today_is_after_min(self):
max_date = date(1001, 2, 28)
st.date_input("the label", min_value=date(1000, 2, 28), max_value=max_date)
c = self.get_delta_from_queue().new_element.date_input
assert datetime.strptime(c.default[0], "%Y/%m/%d").date() == max_date
def test_range_session_state(self):
"""Test a range set by session state."""
date_range_input = [date(2024, 1, 15), date(2024, 1, 15) + timedelta(2)]
state = st.session_state
state["date_range"] = date_range_input[:]
date_range = st.date_input(
"select a date range",
key="date_range",
)
c = self.get_delta_from_queue().new_element.date_input
assert date_range == date_range_input
assert c.value == ["2024/01/15", "2024/01/17"]
assert c.is_range
def test_inside_column(self):
"""Test that it works correctly inside a column."""
col1, _col2 = st.columns(2)
with col1:
st.date_input("foo")
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
date_input_proto = self.get_delta_from_queue().new_element.date_input
assert date_input_proto.label == "foo"
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.date_input("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.date_input
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.date_input("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
@parameterized.expand(
[
("YYYY/MM/DD"),
("DD/MM/YYYY"),
("MM/DD/YYYY"),
("YYYY.MM.DD"),
("DD.MM.YYYY"),
("MM.DD.YYYY"),
("YYYY-MM-DD"),
("DD-MM-YYYY"),
("MM-DD-YYYY"),
]
)
def test_supported_date_format_values(self, format: str):
"""Test that it can be called with supported date formats."""
st.date_input("the label", format=format)
msg = self.get_delta_from_queue().new_element.date_input
assert msg.label == "the label"
assert msg.format == format
@parameterized.expand(
[
("YYYY:MM:DD"), # Unexpected separator
("DD:MM:YYYY"), # Unexpected separator
("MM:DD:YYYY"), # Unexpected separator
("YYYY/DD/MM"), # Incorrect order
("DD/YYYY/MM"), # Incorrect order
("MM/YYYY/DD"), # Incorrect order
("YYYY/MM/DDo"), # Unsupported format
("DDo/MM/YYYY"), # Unsupported format
("Mo/DD/YYYY"), # Unsupported format
("Q/DD/YYYY"), # Unsupported format
("YYYY/QQ/DD"), # Unsupported format
("YYYY/Q/DD"), # Unsupported format
("YYYY/MM/DD HH:mm:ss"), # Unsupported format
(""), # Empty not allowed
]
)
def test_invalid_date_format_values(self, format: str):
"""Test that it raises an exception for invalid date formats."""
with pytest.raises(StreamlitAPIException) as ex:
st.date_input("the label", format=format)
assert str(ex.value).startswith("The provided format")
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.date_input("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_width_config_default(self):
"""Test that default width is 'stretch'."""
st.date_input("the label")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
def test_width_config_pixel(self):
"""Test that pixel width works properly."""
st.date_input("the label", width=200)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 200
def test_width_config_stretch(self):
"""Test that 'stretch' width works properly."""
st.date_input("the label", width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
[
"invalid",
-100,
0,
100.5,
None,
]
)
def test_invalid_width(self, width):
"""Test that invalid width values raise exceptions."""
with pytest.raises(StreamlitInvalidWidthError):
st.date_input("the label", width=width)
def test_stable_id_with_key_single(self):
"""Test that the widget ID is stable for single date when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params (keep whitelisted kwargs stable)
st.date_input(
label="Label 1",
key="date_input_key",
value=date(2020, 1, 1),
help="Help 1",
disabled=False,
width="stretch",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
# Whitelisted kwargs:
min_value=date(2010, 1, 1),
max_value=date(2030, 1, 1),
format="YYYY/MM/DD",
)
c1 = self.get_delta_from_queue().new_element.date_input
id1 = c1.id
# Second render with different non-whitelisted params but same key
st.date_input(
label="Label 2",
key="date_input_key",
value=date(2021, 1, 1),
help="Help 2",
disabled=True,
width=200,
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
# Keep whitelisted the same to ensure ID stability
min_value=date(2010, 1, 1),
max_value=date(2030, 1, 1),
format="YYYY/MM/DD",
)
c2 = self.get_delta_from_queue().new_element.date_input
id2 = c2.id
assert id1 == id2
@parameterized.expand(
[
("min_value", date(2010, 1, 1), date(2011, 1, 1)),
("max_value", date(2030, 1, 1), date(2031, 1, 1)),
]
)
def test_whitelisted_stable_key_kwargs_single(self, kwarg_name, value1, value2):
"""Test that the widget ID changes for single mode when a whitelisted kwarg changes even
when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs = {
"label": "Label",
"key": "date_input_key_1",
"value": date(2020, 1, 1),
"min_value": date(2010, 1, 1),
"max_value": date(2030, 1, 1),
}
base_kwargs[kwarg_name] = value1
st.date_input(**base_kwargs)
c1 = self.get_delta_from_queue().new_element.date_input
id1 = c1.id
base_kwargs[kwarg_name] = value2
st.date_input(**base_kwargs)
c2 = self.get_delta_from_queue().new_element.date_input
id2 = c2.id
assert id1 != id2
@parameterized.expand(
[
("min_value", date(2009, 7, 6), date(2010, 7, 6)),
("max_value", date(2029, 7, 8), date(2030, 7, 8)),
("format", "YYYY/MM/DD", "DD/MM/YYYY"),
]
)
def test_whitelisted_stable_key_kwargs_range(self, kwarg_name, value1, value2):
"""Test that the widget ID changes for range mode when a whitelisted kwarg changes
even when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs = {
"label": "Label",
"key": "date_input_key_2",
"value": (date(2019, 7, 6), date(2019, 7, 8)),
"min_value": date(2009, 7, 6),
"max_value": date(2029, 7, 8),
"format": "YYYY/MM/DD",
}
base_kwargs[kwarg_name] = value1
st.date_input(**base_kwargs)
c1 = self.get_delta_from_queue().new_element.date_input
id1 = c1.id
base_kwargs[kwarg_name] = value2
st.date_input(**base_kwargs)
c2 = self.get_delta_from_queue().new_element.date_input
id2 = c2.id
assert id1 != id2
def test_date_input_interaction():
"""Test interactions with an empty date_input widget."""
def script():
import streamlit as st
st.date_input("the label", value=None)
at = AppTest.from_function(script).run()
date_input = at.date_input[0]
assert date_input.value is None
# Set the value to a specific date
at = date_input.set_value(date(2012, 1, 3)).run()
date_input = at.date_input[0]
assert date_input.value == date(2012, 1, 3)
# # Clear the value
at = date_input.set_value(None).run()
date_input = at.date_input[0]
assert date_input.value is None
def test_None_session_state_value_retained():
def script():
import streamlit as st
if "date_input" not in st.session_state:
st.session_state["date_input"] = None
st.date_input("date_input", key="date_input")
st.button("button")
at = AppTest.from_function(script).run()
at = at.button[0].click().run()
assert at.date_input[0].value is None
| DateInputTest |
python | django-import-export__django-import-export | tests/core/tests/test_base_formats.py | {
"start": 7468,
"end": 8318
} | class ____(TestCase):
def setUp(self):
self.format = base_formats.TSV()
def test_import_mac(self):
filename = os.path.join(
os.path.dirname(__file__), os.path.pardir, "exports", "books-mac.tsv"
)
with open(filename, self.format.get_read_mode()) as in_stream:
actual = in_stream.read()
expected = "id\tname\tauthor_email\n1\tSome book\ttest@example.com\n"
self.assertEqual(actual, expected)
def test_import_unicode(self):
# importing tsv UnicodeEncodeError
filename = os.path.join(
os.path.dirname(__file__), os.path.pardir, "exports", "books-unicode.tsv"
)
with open(filename, self.format.get_read_mode()) as in_stream:
data = force_str(in_stream.read())
base_formats.TSV().create_dataset(data)
| TSVTest |
python | django__django | tests/admin_views/forms.py | {
"start": 193,
"end": 520
} | class ____(AdminAuthenticationForm):
class Media:
css = {"all": ("path/to/media.css",)}
def clean_username(self):
username = self.cleaned_data.get("username")
if username == "customform":
raise ValidationError("custom form error")
return username
| CustomAdminAuthenticationForm |
python | fastai__fastai | fastai/torch_core.py | {
"start": 20265,
"end": 20417
} | class ____(TensorBase): pass
TensorImage.register_func(F.grid_sample, TensorImageBase, TensorFlowField)
# %% ../nbs/00_torch_core.ipynb 119
| TensorFlowField |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 7329,
"end": 7980
} | class ____(TestCase):
def test_goodvalues(self):
z = np.array((-1.0, 0.0, 1.0))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
assert_all(np.isnan(np.array((1.0,)) / 0.0) == 0)
def test_neginf(self):
assert_all(np.isnan(np.array((-1.0,)) / 0.0) == 0)
def test_ind(self):
assert_all(np.isnan(np.array((0.0,)) / 0.0) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1 + 1j) == 0)
def test_complex1(self):
assert_all(np.isnan(np.array(0 + 0j) / 0.0) == 1)
| TestIsnan |
python | Pylons__pyramid | tests/test_config/test_actions.py | {
"start": 20198,
"end": 22018
} | class ____(unittest.TestCase):
def _makeConfigurator(self, *arg, **kw):
from pyramid.config import Configurator
config = Configurator(*arg, **kw)
return config
def test_functional(self):
def add_auto_route(config, name, view):
def register():
config.add_view(route_name=name, view=view)
config.add_route(name, '/' + name)
config.action(('auto route', name), register, order=-30)
config = self._makeConfigurator()
config.add_directive('add_auto_route', add_auto_route)
def my_view(request): # pragma: no cover
return request.response
config.add_auto_route('foo', my_view)
config.commit()
from pyramid.interfaces import IRoutesMapper
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
route = routes[0]
self.assertEqual(len(routes), 1)
self.assertEqual(route.name, 'foo')
self.assertEqual(route.path, '/foo')
def test_deferred_discriminator(self):
# see https://github.com/Pylons/pyramid/issues/2697
from pyramid.config import PHASE0_CONFIG
config = self._makeConfigurator()
def deriver(view, info):
return view
deriver.options = ('foo',)
config.add_view_deriver(deriver, 'foo_view')
# add_view uses a deferred discriminator and will fail if executed
# prior to add_view_deriver executing its action
config.add_view(lambda r: r.response, name='', foo=1)
def dummy_action():
# trigger a re-entrant action
config.action(None, lambda: None)
config.action(None, dummy_action, order=PHASE0_CONFIG)
config.commit()
| Test_reentrant_action_functional |
python | getsentry__sentry | tests/sentry/utils/test_query.py | {
"start": 6304,
"end": 6447
} | class ____(RangeQuerySetWrapperTest):
range_wrapper = RangeQuerySetWrapperWithProgressBarApprox
| RangeQuerySetWrapperWithProgressBarApproxTest |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 24241,
"end": 25756
} | class ____(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
super().__init__()
self.block_1 = Emu3VQVAEResnetBlock(
in_channels=in_channels,
out_channels=in_channels,
quant_channels=quant_channels,
)
self.attn_1 = Emu3VQVAEAttentionBlock(config)
if quant_channels is None:
self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
else:
self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.block_2 = Emu3VQVAEResnetBlock(
in_channels=in_channels,
out_channels=in_channels,
quant_channels=quant_channels,
)
def forward(self, hidden_states: torch.FloatTensor, quant_states: Optional[torch.FloatTensor] = None):
hidden_states = self.block_1(hidden_states, quant_states)
residual = hidden_states
hidden_states = self.attn_norm(hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = self.attn_1(hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
hidden_states = self.block_2(hidden_states, quant_states)
return hidden_states
| Emu3VQVAEMiddleBlock |
python | apache__airflow | helm-tests/tests/helm_tests/redis/test_labels_serviceaccount.py | {
"start": 900,
"end": 4328
} | class ____:
"""Tests redis service account labels."""
AIRFLOW_EXECUTOR = "CeleryExecutor"
TEMPLATE_FILE = "templates/redis/redis-serviceaccount.yaml"
def test_should_add_global_labels(self):
"""Test adding only .Values.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"serviceAccount": {"create": True},
},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
def test_should_add_component_specific_labels(self):
"""Test adding only .Values.redis.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"serviceAccount": {"create": True},
"labels": {"test_component_label": "test_component_label_value"},
},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_should_merge_global_and_component_specific_labels(self):
"""Test adding both .Values.labels and .Values.redis.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"serviceAccount": {"create": True},
"labels": {"test_component_label": "test_component_label_value"},
},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_component_specific_labels_should_override_global_labels(self):
"""Test that component-specific labels take precedence over global labels with the same key."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"serviceAccount": {"create": True},
"labels": {"common_label": "component_value"},
},
"labels": {"common_label": "global_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "common_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["common_label"] == "component_value"
| TestRedisServiceAccount |
python | cython__cython | tests/run/test_named_expressions.py | {
"start": 2989,
"end": 9995
} | class ____(unittest.TestCase):
def test_named_expression_invalid_01(self):
code = """x := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_02(self):
code = """x = y := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_03(self):
code = """y := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_04(self):
code = """y0 = y1 := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_06(self):
code = """((a, b) := (1, 2))"""
# TODO Cython correctly generates an error but the message could be better
with self.assertRaisesRegex(SyntaxError, ""):
exec(code, {}, {})
def test_named_expression_invalid_07(self):
code = """def spam(a = b := 42): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_08(self):
code = """def spam(a: b := 42 = 5): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_09(self):
code = """spam(a=b := 'c')"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_10(self):
code = """spam(x = y := f(x))"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_11(self):
code = """spam(a=1, b := 2)"""
with self.assertRaisesRegex(SyntaxError,
"follow.* keyword arg"):
exec(code, {}, {})
def test_named_expression_invalid_12(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"follow.* keyword arg"):
exec(code, {}, {})
def test_named_expression_invalid_13(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"follow.* keyword arg"):
exec(code, {}, {})
def test_named_expression_invalid_14(self):
code = """(x := lambda: y := 1)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_15(self):
code = """(lambda: x := 1)"""
# TODO at the moment the error message is valid, but not the same as Python
with self.assertRaisesRegex(SyntaxError,
""):
exec(code, {}, {})
def test_named_expression_invalid_16(self):
code = "[i + 1 for i in i := [1,2]]"
# TODO at the moment the error message is valid, but not the same as Python
with self.assertRaisesRegex(SyntaxError, ""):
exec(code, {}, {})
def test_named_expression_invalid_17(self):
code = "[i := 0, j := 1 for i, j in [(1, 2), (3, 4)]]"
# TODO at the moment the error message is valid, but not the same as Python
with self.assertRaisesRegex(SyntaxError, ""):
exec(code, {}, {})
def test_named_expression_invalid_in_class_body(self):
code = """class Foo():
[(42, 1 + ((( j := i )))) for i in range(5)]
"""
with self.assertRaisesRegex(SyntaxError,
"assignment expression within a comprehension cannot be used in a class body"):
exec(code, {}, {})
def test_named_expression_invalid_rebinding_comprehension_iteration_variable(self):
cases = [
("Local reuse", 'i', "[i := 0 for i in range(5)]"),
("Nested reuse", 'j', "[[(j := 0) for i in range(5)] for j in range(5)]"),
("Reuse inner loop target", 'j', "[(j := 0) for i in range(5) for j in range(5)]"),
("Unpacking reuse", 'i', "[i := 0 for i, j in [(0, 1)]]"),
("Reuse in loop condition", 'i', "[i+1 for i in range(5) if (i := 0)]"),
("Unreachable reuse", 'i', "[False or (i:=0) for i in range(5)]"),
("Unreachable nested reuse", 'i',
"[(i, j) for i in range(5) for j in range(5) if True or (i:=10)]"),
]
for case, target, code in cases:
msg = f"assignment expression cannot rebind comprehension iteration variable '{target}'"
with self.subTest(case=case):
with self.assertRaisesRegex(SyntaxError, msg):
exec(code, {}, {})
def test_named_expression_invalid_rebinding_comprehension_inner_loop(self):
cases = [
("Inner reuse", 'j', "[i for i in range(5) if (j := 0) for j in range(5)]"),
("Inner unpacking reuse", 'j', "[i for i in range(5) if (j := 0) for j, k in [(0, 1)]]"),
]
for case, target, code in cases:
msg = f"comprehension inner loop cannot rebind assignment expression target '{target}'"
with self.subTest(case=case):
with self.assertRaisesRegex(SyntaxError, msg):
exec(code, {}) # Module scope
with self.assertRaisesRegex(SyntaxError, msg):
exec(code, {}, {}) # Class scope
with self.assertRaisesRegex(SyntaxError, msg):
exec(f"lambda: {code}", {}) # Function scope
def test_named_expression_invalid_comprehension_iterable_expression(self):
cases = [
("Top level", "[i for i in (i := range(5))]"),
("Inside tuple", "[i for i in (2, 3, i := range(5))]"),
("Inside list", "[i for i in [2, 3, i := range(5)]]"),
("Different name", "[i for i in (j := range(5))]"),
("Lambda expression", "[i for i in (lambda:(j := range(5)))()]"),
("Inner loop", "[i for i in range(5) for j in (i := range(5))]"),
("Nested comprehension", "[i for i in [j for j in (k := range(5))]]"),
("Nested comprehension condition", "[i for i in [j for j in range(5) if (j := True)]]"),
("Nested comprehension body", "[i for i in [(j := True) for j in range(5)]]"),
]
msg = "assignment expression cannot be used in a comprehension iterable expression"
for case, code in cases:
with self.subTest(case=case):
with self.assertRaisesRegex(SyntaxError, msg):
exec(code, {}) # Module scope - FIXME this test puts it in __invoke in cython_inline
with self.assertRaisesRegex(SyntaxError, msg):
exec(code, {}, {}) # Class scope
with self.assertRaisesRegex(SyntaxError, msg):
exec(f"lambda: {code}", {}) # Function scope
| NamedExpressionInvalidTest |
python | kamyu104__LeetCode-Solutions | Python/flip-equivalent-binary-trees.py | {
"start": 227,
"end": 1110
} | class ____(object):
def flipEquiv(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
dq1, dq2 = collections.deque([root1]), collections.deque([root2])
while dq1 and dq2:
node1, node2 = dq1.pop(), dq2.pop()
if not node1 and not node2:
continue
if not node1 or not node2 or node1.val != node2.val:
return False
if (not node1.left and not node2.right) or \
(node1.left and node2.right and node1.left.val == node2.right.val):
dq1.extend([node1.right, node1.left])
else:
dq1.extend([node1.left, node1.right])
dq2.extend([node2.left, node2.right])
return not dq1 and not dq2
# Time: O(n)
# Space: O(h)
# iterative dfs solution
| Solution |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 1836,
"end": 5362
} | class ____:
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = NodeDataView(cls.G)
cls.ndv = cls.G.nodes.data(True)
cls.nwv = cls.G.nodes.data("foo")
def test_viewtype(self):
nv = self.G.nodes
ndvfalse = nv.data(False)
assert nv is ndvfalse
assert nv is not self.ndv
def test_pickle(self):
import pickle
nv = self.nv
pnv = pickle.loads(pickle.dumps(nv, -1))
assert nv == pnv
assert nv.__slots__ == pnv.__slots__
def test_str(self):
msg = str([(n, {}) for n in range(9)])
assert str(self.ndv) == msg
def test_repr(self):
expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
assert repr(self.nv) == expected
expected = (
"NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, "
+ "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})"
)
assert repr(self.ndv) == expected
expected = (
"NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, "
+ "5: None, 6: None, 7: None, 8: None}, data='foo')"
)
assert repr(self.nwv) == expected
def test_contains(self):
G = self.G.copy()
nv = G.nodes.data()
nwv = G.nodes.data("foo")
G.nodes[3]["foo"] = "bar"
assert (7, {}) in nv
assert (3, {"foo": "bar"}) in nv
assert (3, "bar") in nwv
assert (7, None) in nwv
# default
nwv_def = G.nodes(data="foo", default="biz")
assert (7, "biz") in nwv_def
assert (3, "bar") in nwv_def
def test_getitem(self):
G = self.G.copy()
nv = G.nodes
G.nodes[3]["foo"] = "bar"
assert nv[3] == {"foo": "bar"}
# default
nwv_def = G.nodes(data="foo", default="biz")
assert nwv_def[7], "biz"
assert nwv_def[3] == "bar"
# slicing
with pytest.raises(nx.NetworkXError):
G.nodes.data()[0:5]
def test_iter(self):
G = self.G.copy()
nv = G.nodes.data()
ndv = G.nodes.data(True)
nwv = G.nodes.data("foo")
for i, (n, d) in enumerate(nv):
assert i == n
assert d == {}
inv = iter(nv)
assert next(inv) == (0, {})
G.nodes[3]["foo"] = "bar"
# default
for n, d in nv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
# data=True
for n, d in ndv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
# data='foo'
for n, d in nwv:
if n == 3:
assert d == "bar"
else:
assert d is None
# data='foo', default=1
for n, d in G.nodes.data("foo", default=1):
if n == 3:
assert d == "bar"
else:
assert d == 1
def test_nodedataview_unhashable():
G = nx.path_graph(9)
G.nodes[3]["foo"] = "bar"
nvs = [G.nodes.data()]
nvs.append(G.nodes.data(True))
H = G.copy()
H.nodes[4]["foo"] = {1, 2, 3}
nvs.append(H.nodes.data(True))
# raise unhashable
for nv in nvs:
pytest.raises(TypeError, set, nv)
pytest.raises(TypeError, eval, "nv | nv", locals())
# no raise... hashable
Gn = G.nodes.data(False)
set(Gn)
Gn | Gn
Gn = G.nodes.data("foo")
set(Gn)
Gn | Gn
| TestNodeDataView |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/cutlass_lib_extensions/cutlass_mock_imports/cuda/cuda.py | {
"start": 162,
"end": 204
} | class ____:
CUDA_SUCCESS = True
| CUresult |
python | bokeh__bokeh | src/bokeh/util/callback_manager.py | {
"start": 4349,
"end": 7653
} | class ____:
''' A mixin class to provide an interface for registering and
triggering callbacks.
'''
document: Document | None
_callbacks: dict[str, list[PropertyCallback]]
def __init__(self, *args: Any, **kw: Any) -> None:
super().__init__(*args, **kw)
self._callbacks = {}
def on_change(self, attr: str, *callbacks: PropertyCallback) -> None:
''' Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
callback (callable) : a callback function to register
Returns:
None
'''
if len(callbacks) == 0:
raise ValueError("on_change takes an attribute name and one or more callbacks, got only one parameter")
_callbacks = self._callbacks.setdefault(attr, [])
for callback in callbacks:
if callback in _callbacks:
continue
_check_callback(callback, ('attr', 'old', 'new'))
_callbacks.append(callback)
def remove_on_change(self, attr: str, *callbacks: PropertyCallback) -> None:
''' Remove a callback from this object '''
if len(callbacks) == 0:
raise ValueError("remove_on_change takes an attribute name and one or more callbacks, got only one parameter")
_callbacks = self._callbacks.setdefault(attr, [])
for callback in callbacks:
_callbacks.remove(callback)
def trigger(self, attr: str, old: Any, new: Any,
hint: DocumentPatchedEvent | None = None, setter: Setter | None = None) -> None:
''' Trigger callbacks for ``attr`` on this object.
'''
def invoke() -> None:
callbacks = self._callbacks.get(attr)
if callbacks:
for callback in callbacks:
callback(attr, old, new)
if self.document is not None:
from ..model import Model
self.document.callbacks.notify_change(cast(Model, self), attr, old, new, hint, setter, invoke)
else:
invoke()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _nargs(fn: Callable[..., Any]) -> int:
sig = signature(fn)
all_names, default_values = get_param_info(sig)
return len(all_names) - len(default_values)
def _check_callback(callback: Callable[..., Any], fargs: Sequence[str], what: str ="Callback functions") -> None:
'''Bokeh-internal function to check callback signature'''
sig = signature(callback)
all_names, default_values = get_param_info(sig)
nargs = len(all_names) - len(default_values)
if nargs != len(fargs):
expected = ", ".join(fargs)
received = str(sig)
raise ValueError(f"{what} must have signature func({expected}), got func{received}")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| PropertyCallbackManager |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_ranking.py | {
"start": 7980,
"end": 8386
} | class ____(UptimeTestCase):
def test(self) -> None:
assert should_autodetect_for_project(self.project)
self.project.update_option("sentry:uptime_autodetection", False)
assert not should_autodetect_for_project(self.project)
self.project.update_option("sentry:uptime_autodetection", True)
assert should_autodetect_for_project(self.project)
| ShouldDetectForProjectTest |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 9890,
"end": 10107
} | class ____:
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
| MixinForBoringModel |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 3139,
"end": 3348
} | class ____(graphene.Interface):
results = non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun")
count = graphene.Int()
class Meta:
name = "PipelineRuns"
| GraphenePipelineRuns |
python | pytorch__pytorch | test/test_mps.py | {
"start": 362029,
"end": 370577
} | class ____(TestCaseMPS):
def _wrap_tensor(self, x, device="cpu", dtype=None, requires_grad=False):
return torch.tensor(x, device=device, dtype=dtype, requires_grad=requires_grad)
def test_logical_not(self):
def helper(x):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
result = torch.logical_not(x)
result_cpu = torch.logical_not(cpu_x)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]))
helper(self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True))
helper(self._wrap_tensor([True, True, False, False]))
helper(self._wrap_tensor(1))
helper(self._wrap_tensor(0))
helper(self._wrap_tensor(True))
helper(self._wrap_tensor(False))
def test_logical_and(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_and(x, other)
result_cpu = torch.logical_and(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
def test_logical_or(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_or(x, other)
result_cpu = torch.logical_or(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
def test_logical_xor(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_xor(x, other)
result_cpu = torch.logical_xor(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor([1, 0, 0, 1]))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
@parametrize("dtype", [torch.float32, torch.float16, torch.int32, torch.int16, torch.uint8, torch.int8, torch.bool])
def test_min_max(self, dtype):
for _ in range(10):
if dtype == torch.float32 or dtype == torch.float16:
x = torch.randn((30, 15), device='mps', dtype=dtype)
else:
x = torch.randint(0, 100, (30, 15), device="mps", dtype=dtype)
x_cpu = x.to("cpu")
y = x.max()
y_cpu = x_cpu.max()
self.assertEqual(y, y_cpu)
z = x.min()
z_cpu = x_cpu.min()
self.assertEqual(z, z_cpu)
@parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_min_max_nan_propagation(self, dtype):
cpu_x = torch.tensor([1.0, float("nan"), 3.0], device="cpu", dtype=dtype)
mps_x = cpu_x.detach().clone().to('mps')
cpu_max = torch.max(cpu_x)
mps_max = torch.max(mps_x).to('cpu')
cpu_amax = torch.amax(cpu_x)
mps_amax = torch.amax(mps_x).to('cpu')
cpu_min = torch.min(cpu_x)
mps_min = torch.min(mps_x).to('cpu')
cpu_amin = torch.amin(cpu_x)
mps_amin = torch.amin(mps_x).to('cpu')
self.assertEqual(cpu_max, mps_max)
self.assertEqual(cpu_amax, mps_amax)
self.assertEqual(cpu_min, mps_min)
self.assertEqual(cpu_amin, mps_amin)
def test_isin(self):
def helper(dtype):
shapes = [([2, 5], [3, 5, 2]), ([10, 3, 5], [20, 1, 3]),
([5], [10]), ([0], [5]), ([5], [0])]
for shape_tuple in shapes:
for inverted in [True, False]:
if dtype.is_floating_point:
# Half is not supported for CPU isin. Compute reference in FP32
A = torch.randn(size=shape_tuple[0], device='cpu', dtype=torch.float32)
B = torch.randn(size=shape_tuple[1], device='cpu', dtype=torch.float32)
else:
A = torch.randint(0, 100, size=shape_tuple[0], device='cpu', dtype=dtype)
B = torch.randint(0, 100, size=shape_tuple[1], device='cpu', dtype=dtype)
A_mps = A.detach().clone().to('mps')
B_mps = B.detach().clone().to('mps')
cpu_ref = torch.isin(A, B, invert=inverted)
if dtype in [torch.float16, torch.bfloat16]:
cpu_ref.type(dtype)
mps_out = torch.isin(A_mps, B_mps, invert=inverted)
self.assertEqual(mps_out, cpu_ref)
dtypes = [torch.float32, torch.float16, torch.bfloat16, torch.int32, torch.int16, torch.uint8, torch.int8]
[helper(dtype) for dtype in dtypes]
# Mixed dtypes (see https://github.com/pytorch/pytorch/issues/151443 )
x = torch.arange(4.0, device="mps")
y = torch.tensor([1, 3], device="mps", dtype=torch.float16)
self.assertEqual(torch.isin(x, y), torch.tensor([False, True, False, True], device="mps"))
# Tensor.Scalar variant (aliases to eq), not covered by OpInfo
self.assertEqual(torch.isin(x, 2.0), torch.tensor([False, False, True, False], device="mps"))
self.assertEqual(torch.isin(x, 1.0, invert=True), torch.tensor([True, False, True, True], device="mps"))
self.assertEqual(torch.isin(x, 8.0), torch.tensor([False, False, False, False], device="mps"))
# Scalar.Tensor variant(alaises to Scalar.Scalar), not covered by OpInfo
self.assertEqual(torch.isin(2.0, x), torch.tensor(True, device="mps"))
def test_isin_asserts(self):
C = torch.randn(size=[1, 4], device='mps', dtype=torch.float32)
D = torch.randn(size=[1, 4], device='cpu', dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, 'Expected elements.is_mps()*'):
out = torch.isin(C, D)
@parametrize("dtype", [torch.int32, torch.int64, torch.int16, torch.int8, torch.uint8, torch.bool])
def test_shifts(self, dtype):
x = make_tensor(256, device="mps", dtype=dtype)
if dtype is not torch.bool:
x[3] = torch.iinfo(dtype).max
x[5] = torch.iinfo(dtype).min
x_cpu = x.cpu()
self.assertEqual((x >> 3).cpu(), x_cpu >> 3)
self.assertEqual((x << 1).cpu(), x_cpu << 1)
# Regression test for https://github.com/pytorch/pytorch/issues/147889
x = x.clamp(0, 8)
x_cpu = x.cpu()
self.assertEqual((4095 >> x).cpu(), 4095 >> x_cpu)
self.assertEqual((257 << x).cpu(), 257 << x_cpu)
| TestLogical |
python | numpy__numpy | numpy/_core/tests/test_defchararray.py | {
"start": 4623,
"end": 6190
} | class ____:
def A(self):
return np.array([['abc', 'abcc', '123'],
['789', 'abc', 'xyz']]).view(np.char.chararray)
def B(self):
return np.array([['efg', 'efg', '123 '],
['051', 'efgg', 'tuv']]).view(np.char.chararray)
def test_not_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A != B),
[[True, True, False], [True, True, True]])
def test_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A == B),
[[False, False, True], [False, False, False]])
def test_greater_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A >= B),
[[False, False, True], [True, False, True]])
def test_less_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A <= B),
[[True, True, True], [False, True, False]])
def test_greater(self):
A, B = self.A(), self.B()
assert_array_equal((A > B),
[[False, False, False], [True, False, True]])
def test_less(self):
A, B = self.A(), self.B()
assert_array_equal((A < B),
[[True, True, False], [False, True, False]])
def test_type(self):
A, B = self.A(), self.B()
out1 = np.char.equal(A, B)
out2 = np.char.equal('a', 'a')
assert_(isinstance(out1, np.ndarray))
assert_(isinstance(out2, np.ndarray))
| TestComparisons |
python | spack__spack | var/spack/test_repos/spack_repo/edges_test/packages/conditional_edge/package.py | {
"start": 216,
"end": 825
} | class ____(Package):
"""This package has a variant that triggers a condition only if a required dependency is
providing a virtual.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("2.0", md5="abcdef0123456789abcdef0123456789")
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("foo", default=False, description="Just a regular foo")
# zlib is a real package, providing zlib-api
depends_on("zlib")
depends_on("zlib-api", when="+foo")
depends_on("zlib@1.0", when="^[virtuals=zlib-api] zlib")
| ConditionalEdge |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 8857,
"end": 8996
} | class ____(MetricObjectDefinition):
raw_metric_mri: str
filters: Callable[..., Function] | None = None
| AliasedDerivedMetricDefinition |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_action_export.py | {
"start": 17499,
"end": 18910
} | class ____(AdminTestMixin, TestCase):
"""
Test config values when export is initiated from the 'Export' action in the action
menu.
"""
def setUp(self):
super().setUp()
self.cat1 = Category.objects.create(name="Cat 1")
self.queryset = Category.objects.all()
self.model_admin = CategoryAdmin(Category, AdminSite())
factory = RequestFactory()
data = {
"action": ["export_admin_action"],
"_selected_action": [str(self.cat1.id)],
}
self.request = factory.post(self.category_change_url, data=data)
self.request.user = User.objects.create_user("admin1")
def test_skip_export_form_from_action_enabled(self):
self.model_admin.skip_export_form_from_action = True
response = self.model_admin.export_admin_action(self.request, self.queryset)
target_file_contents = "id,name\r\n" f"{self.cat1.id},Cat 1\r\n"
self.assertEqual(target_file_contents.encode(), response.content)
@override_settings(IMPORT_EXPORT_SKIP_ADMIN_ACTION_EXPORT_UI=True)
def test_skip_export_form_from_action_setting_enabled(self):
response = self.model_admin.export_admin_action(self.request, self.queryset)
target_file_contents = "id,name\r\n" f"{self.cat1.id},Cat 1\r\n"
self.assertEqual(target_file_contents.encode(), response.content)
| TestSkipExportFormFromAction |
python | doocs__leetcode | lcof2/剑指 Offer II 107. 矩阵中的距离/Solution.py | {
"start": 0,
"end": 693
} | class ____:
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
m, n = len(mat), len(mat[0])
ans = [[-1] * n for _ in range(m)]
q = deque()
for i, row in enumerate(mat):
for j, v in enumerate(row):
if v == 0:
ans[i][j] = 0
q.append((i, j))
dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
while q:
i, j = q.popleft()
for a, b in dirs:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and ans[x][y] == -1:
ans[x][y] = ans[i][j] + 1
q.append((x, y))
return ans
| Solution |
python | django__django | tests/utils_tests/test_http.py | {
"start": 14717,
"end": 15045
} | class ____(unittest.TestCase):
def test(self):
tests = (
("//example.com", "/%2Fexample.com"),
("//", "/%2F"),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
| EscapeLeadingSlashesTests |
python | coleifer__peewee | tests/models.py | {
"start": 113330,
"end": 121335
} | class ____(BaseTestCase):
def test_field_inheritance(self):
class BaseModel(Model):
class Meta:
database = get_in_memory_db()
class BasePost(BaseModel):
content = TextField()
timestamp = TimestampField()
class Photo(BasePost):
image = TextField()
class Note(BasePost):
category = TextField()
self.assertEqual(BasePost._meta.sorted_field_names,
['id', 'content', 'timestamp'])
self.assertEqual(BasePost._meta.sorted_fields, [
BasePost.id,
BasePost.content,
BasePost.timestamp])
self.assertEqual(Photo._meta.sorted_field_names,
['id', 'content', 'timestamp', 'image'])
self.assertEqual(Photo._meta.sorted_fields, [
Photo.id,
Photo.content,
Photo.timestamp,
Photo.image])
self.assertEqual(Note._meta.sorted_field_names,
['id', 'content', 'timestamp', 'category'])
self.assertEqual(Note._meta.sorted_fields, [
Note.id,
Note.content,
Note.timestamp,
Note.category])
self.assertTrue(id(Photo.id) != id(Note.id))
def test_foreign_key_field_inheritance(self):
class BaseModel(Model):
class Meta:
database = get_in_memory_db()
class Category(BaseModel):
name = TextField()
class BasePost(BaseModel):
category = ForeignKeyField(Category)
timestamp = TimestampField()
class Photo(BasePost):
image = TextField()
class Note(BasePost):
content = TextField()
self.assertEqual(BasePost._meta.sorted_field_names,
['id', 'category', 'timestamp'])
self.assertEqual(BasePost._meta.sorted_fields, [
BasePost.id,
BasePost.category,
BasePost.timestamp])
self.assertEqual(Photo._meta.sorted_field_names,
['id', 'category', 'timestamp', 'image'])
self.assertEqual(Photo._meta.sorted_fields, [
Photo.id,
Photo.category,
Photo.timestamp,
Photo.image])
self.assertEqual(Note._meta.sorted_field_names,
['id', 'category', 'timestamp', 'content'])
self.assertEqual(Note._meta.sorted_fields, [
Note.id,
Note.category,
Note.timestamp,
Note.content])
self.assertEqual(Category._meta.backrefs, {
BasePost.category: BasePost,
Photo.category: Photo,
Note.category: Note})
self.assertEqual(BasePost._meta.refs, {BasePost.category: Category})
self.assertEqual(Photo._meta.refs, {Photo.category: Category})
self.assertEqual(Note._meta.refs, {Note.category: Category})
self.assertEqual(BasePost.category.backref, 'basepost_set')
self.assertEqual(Photo.category.backref, 'photo_set')
self.assertEqual(Note.category.backref, 'note_set')
def test_foreign_key_pk_inheritance(self):
class BaseModel(Model):
class Meta:
database = get_in_memory_db()
class Account(BaseModel): pass
class BaseUser(BaseModel):
account = ForeignKeyField(Account, primary_key=True)
class User(BaseUser):
username = TextField()
class Admin(BaseUser):
role = TextField()
self.assertEqual(Account._meta.backrefs, {
Admin.account: Admin,
User.account: User,
BaseUser.account: BaseUser})
self.assertEqual(BaseUser.account.backref, 'baseuser_set')
self.assertEqual(User.account.backref, 'user_set')
self.assertEqual(Admin.account.backref, 'admin_set')
self.assertTrue(Account.user_set.model is Account)
self.assertTrue(Account.admin_set.model is Account)
self.assertTrue(Account.user_set.rel_model is User)
self.assertTrue(Account.admin_set.rel_model is Admin)
self.assertSQL(Account._schema._create_table(), (
'CREATE TABLE IF NOT EXISTS "account" ('
'"id" INTEGER NOT NULL PRIMARY KEY)'), [])
self.assertSQL(User._schema._create_table(), (
'CREATE TABLE IF NOT EXISTS "user" ('
'"account_id" INTEGER NOT NULL PRIMARY KEY, '
'"username" TEXT NOT NULL, '
'FOREIGN KEY ("account_id") REFERENCES "account" ("id"))'), [])
self.assertSQL(Admin._schema._create_table(), (
'CREATE TABLE IF NOT EXISTS "admin" ('
'"account_id" INTEGER NOT NULL PRIMARY KEY, '
'"role" TEXT NOT NULL, '
'FOREIGN KEY ("account_id") REFERENCES "account" ("id"))'), [])
def test_backref_inheritance(self):
class Category(TestModel): pass
def backref(fk_field):
return '%ss' % fk_field.model._meta.name
class BasePost(TestModel):
category = ForeignKeyField(Category, backref=backref)
class Note(BasePost): pass
class Photo(BasePost): pass
self.assertEqual(Category._meta.backrefs, {
BasePost.category: BasePost,
Note.category: Note,
Photo.category: Photo})
self.assertEqual(BasePost.category.backref, 'baseposts')
self.assertEqual(Note.category.backref, 'notes')
self.assertEqual(Photo.category.backref, 'photos')
self.assertTrue(Category.baseposts.rel_model is BasePost)
self.assertTrue(Category.baseposts.model is Category)
self.assertTrue(Category.notes.rel_model is Note)
self.assertTrue(Category.notes.model is Category)
self.assertTrue(Category.photos.rel_model is Photo)
self.assertTrue(Category.photos.model is Category)
class BaseItem(TestModel):
category = ForeignKeyField(Category, backref='items')
class ItemA(BaseItem): pass
class ItemB(BaseItem): pass
self.assertEqual(BaseItem.category.backref, 'items')
self.assertEqual(ItemA.category.backref, 'itema_set')
self.assertEqual(ItemB.category.backref, 'itemb_set')
self.assertTrue(Category.items.rel_model is BaseItem)
self.assertTrue(Category.itema_set.rel_model is ItemA)
self.assertTrue(Category.itema_set.model is Category)
self.assertTrue(Category.itemb_set.rel_model is ItemB)
self.assertTrue(Category.itemb_set.model is Category)
@skip_if(IS_SQLITE, 'sqlite is not supported')
@skip_if(IS_MYSQL, 'mysql is not raising this error(?)')
@skip_if(IS_CRDB, 'crdb is not raising the error in this test(?)')
def test_deferred_fk_creation(self):
class B(TestModel):
a = DeferredForeignKey('A', null=True)
b = TextField()
class A(TestModel):
a = TextField()
db.create_tables([A, B])
try:
# Test that we can create B with null "a_id" column:
a = A.create(a='a')
b = B.create(b='b')
# Test that we can create B that has no corresponding A:
fake_a = A(id=31337)
b2 = B.create(a=fake_a, b='b2')
b2_db = B.get(B.a == fake_a)
self.assertEqual(b2_db.b, 'b2')
# Ensure error occurs trying to create_foreign_key.
with db.atomic():
self.assertRaises(
IntegrityError,
B._schema.create_foreign_key,
B.a)
b2_db.delete_instance()
# We can now create the foreign key.
B._schema.create_foreign_key(B.a)
# The foreign-key is enforced:
with db.atomic():
self.assertRaises(IntegrityError, B.create, a=fake_a, b='b3')
finally:
db.drop_tables([A, B])
| TestFieldInheritance |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_musculotendon.py | {
"start": 15510,
"end": 21367
} | class ____:
@pytest.fixture(autouse=True)
def _musculotendon_tendon_force_explicit_fixture(
self,
musculotendon_concrete,
curve,
):
self.name = 'name'
self.N = ReferenceFrame('N')
self.q = dynamicsymbols('q')
self.origin = Point('pO')
self.insertion = Point('pI')
self.insertion.set_pos(self.origin, self.q*self.N.x)
self.pathway = LinearPathway(self.origin, self.insertion)
self.activation = FirstOrderActivationDeGroote2016(self.name)
self.e = self.activation.excitation
self.a = self.activation.activation
self.tau_a = self.activation.activation_time_constant
self.tau_d = self.activation.deactivation_time_constant
self.b = self.activation.smoothing_rate
self.formulation = MusculotendonFormulation.TENDON_FORCE_EXPLICIT
self.l_T_slack = Symbol('l_T_slack')
self.F_M_max = Symbol('F_M_max')
self.l_M_opt = Symbol('l_M_opt')
self.v_M_max = Symbol('v_M_max')
self.alpha_opt = Symbol('alpha_opt')
self.beta = Symbol('beta')
self.instance = musculotendon_concrete(
self.name,
self.pathway,
self.activation,
musculotendon_dynamics=self.formulation,
tendon_slack_length=self.l_T_slack,
peak_isometric_force=self.F_M_max,
optimal_fiber_length=self.l_M_opt,
maximal_fiber_velocity=self.v_M_max,
optimal_pennation_angle=self.alpha_opt,
fiber_damping_coefficient=self.beta,
with_defaults=True,
)
self.F_T_tilde = dynamicsymbols('F_T_tilde_name')
l_T_tilde = curve.tendon_force_length_inverse.with_defaults(self.F_T_tilde)
l_MT = self.pathway.length
v_MT = self.pathway.extension_velocity
l_T = l_T_tilde*self.l_T_slack
l_M = sqrt((l_MT - l_T)**2 + (self.l_M_opt*sin(self.alpha_opt))**2)
l_M_tilde = l_M/self.l_M_opt
cos_alpha = (l_MT - l_T)/l_M
F_T = self.F_T_tilde*self.F_M_max
F_M = F_T/cos_alpha
F_M_tilde = F_M/self.F_M_max
fl_M_pas = curve.fiber_force_length_passive.with_defaults(l_M_tilde)
fl_M_act = curve.fiber_force_length_active.with_defaults(l_M_tilde)
fv_M = (F_M_tilde - fl_M_pas)/(self.a*fl_M_act)
v_M_tilde = curve.fiber_force_velocity_inverse.with_defaults(fv_M)
v_M = v_M_tilde*self.v_M_max
v_T = v_MT - v_M/cos_alpha
v_T_tilde = v_T/self.l_T_slack
self.dF_T_tilde_expr = (
Float('0.2')*Float('33.93669377311689')*exp(
Float('33.93669377311689')*UnevaluatedExpr(l_T_tilde - Float('0.995'))
)*v_T_tilde
)
self.da_expr = (
(1/(self.tau_a*(Rational(1, 2) + Rational(3, 2)*self.a)))
*(Rational(1, 2) + Rational(1, 2)*tanh(self.b*(self.e - self.a)))
+ ((Rational(1, 2) + Rational(3, 2)*self.a)/self.tau_d)
*(Rational(1, 2) - Rational(1, 2)*tanh(self.b*(self.e - self.a)))
)*(self.e - self.a)
def test_state_vars(self):
assert hasattr(self.instance, 'x')
assert hasattr(self.instance, 'state_vars')
assert self.instance.x == self.instance.state_vars
x_expected = Matrix([self.F_T_tilde, self.a])
assert self.instance.x == x_expected
assert self.instance.state_vars == x_expected
assert isinstance(self.instance.x, Matrix)
assert isinstance(self.instance.state_vars, Matrix)
assert self.instance.x.shape == (2, 1)
assert self.instance.state_vars.shape == (2, 1)
def test_input_vars(self):
assert hasattr(self.instance, 'r')
assert hasattr(self.instance, 'input_vars')
assert self.instance.r == self.instance.input_vars
r_expected = Matrix([self.e])
assert self.instance.r == r_expected
assert self.instance.input_vars == r_expected
assert isinstance(self.instance.r, Matrix)
assert isinstance(self.instance.input_vars, Matrix)
assert self.instance.r.shape == (1, 1)
assert self.instance.input_vars.shape == (1, 1)
def test_constants(self):
assert hasattr(self.instance, 'p')
assert hasattr(self.instance, 'constants')
assert self.instance.p == self.instance.constants
p_expected = Matrix(
[
self.l_T_slack,
self.F_M_max,
self.l_M_opt,
self.v_M_max,
self.alpha_opt,
self.beta,
self.tau_a,
self.tau_d,
self.b,
]
)
assert self.instance.p == p_expected
assert self.instance.constants == p_expected
assert isinstance(self.instance.p, Matrix)
assert isinstance(self.instance.constants, Matrix)
assert self.instance.p.shape == (9, 1)
assert self.instance.constants.shape == (9, 1)
def test_M(self):
assert hasattr(self.instance, 'M')
M_expected = eye(2)
assert self.instance.M == M_expected
assert isinstance(self.instance.M, Matrix)
assert self.instance.M.shape == (2, 2)
def test_F(self):
assert hasattr(self.instance, 'F')
F_expected = Matrix([self.dF_T_tilde_expr, self.da_expr])
assert self.instance.F == F_expected
assert isinstance(self.instance.F, Matrix)
assert self.instance.F.shape == (2, 1)
def test_rhs(self):
assert hasattr(self.instance, 'rhs')
rhs_expected = Matrix([self.dF_T_tilde_expr, self.da_expr])
rhs = self.instance.rhs()
assert isinstance(rhs, Matrix)
assert rhs.shape == (2, 1)
assert simplify(rhs - rhs_expected) == zeros(2, 1)
| TestTendonForceExplicit |
python | getsentry__responses | responses/__init__.py | {
"start": 21448,
"end": 21602
} | class ____(BaseResponse):
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, passthrough=True, **kwargs)
| PassthroughResponse |
python | astropy__astropy | astropy/table/index.py | {
"start": 36358,
"end": 37865
} | class ____(list):
"""
List-subclass of table indices allowing for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
"""
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item) -> Index:
"""
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
"""
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
@deprecated(
since="7.2.0",
message="""\
Calling `Table.loc/iloc/loc_indices[index_id, item]` to select `item` from index
`index_id` is deprecated. Instead select the index using the syntax
`Table.loc/iloc/loc_indices.with_index(index_id)[item]`.
""",
)
def interpret_item_as_index_id_and_item(item: tuple) -> tuple:
"""Interpret the item as a (index_id, item) tuple."""
index_id, item = item
return index_id, item
| TableIndices |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 487482,
"end": 488234
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for CheckAnnotation."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("CheckAnnotationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of(CheckAnnotation), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| CheckAnnotationConnection |
python | PyCQA__pylint | pylint/testutils/pyreverse.py | {
"start": 2646,
"end": 2773
} | class ____(TypedDict):
source_roots: list[str]
output_formats: list[str]
command_line_args: list[str]
| TestFileOptions |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 78614,
"end": 80270
} | class ____(_fixtures.FixtureTest):
"""test the noload stratgegy which unlike others doesn't use
lazyloader to set up instrumentation"""
def test_o2m(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, back_populates="user", lazy="noload"
)
},
)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
with expect_noload_deprecation():
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_m2o(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User, back_populates="addresses", lazy="noload"
)
},
)
with expect_noload_deprecation():
u1 = User()
a1 = Address()
a1.user = u1
in_(a1, u1.addresses)
| NoLoadBackPopulates |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 125553,
"end": 130233
} | class ____(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
@cython.inline
@cython.nogil
@cython.critical_section
"""
# list of directives that cause conversion to cclass
converts_to_cclass = ('cclass', 'total_ordering', 'dataclasses.dataclass')
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if 'inline' in self.directives:
modifiers.append('inline')
nogil = self.directives.get('nogil')
with_gil = self.directives.get('with_gil')
except_val = self.directives.get('exceptval')
has_explicit_exc_clause = False if except_val is None else True
return_type_node = self.directives.get('returns')
if return_type_node is None and self.directives['annotation_typing']:
return_type_node = node.return_type_annotation
# for Python annotations, prefer safe exception handling by default
if return_type_node is not None and except_val is None:
except_val = (None, True) # except *
elif except_val is None:
# backward compatible default: no exception check, unless there's also a "@returns" declaration
except_val = (None, True if return_type_node else False)
if self.directives.get('c_compile_guard') and 'cfunc' not in self.directives:
error(node.pos, "c_compile_guard only allowed on C functions")
if 'ccall' in self.directives:
if 'cfunc' in self.directives:
error(node.pos, "cfunc and ccall directives cannot be combined")
if with_gil:
error(node.pos, "ccall functions cannot be declared 'with_gil'")
node = node.as_cfunction(
overridable=True, modifiers=modifiers, nogil=nogil,
returns=return_type_node, except_val=except_val, has_explicit_exc_clause=has_explicit_exc_clause)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(
overridable=False, modifiers=modifiers, nogil=nogil, with_gil=with_gil,
returns=return_type_node, except_val=except_val, has_explicit_exc_clause=has_explicit_exc_clause)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
if nogil:
# TODO: turn this into a "with gil" declaration.
error(node.pos, "Python functions cannot be declared 'nogil'")
if with_gil:
error(node.pos, "Python functions cannot be declared 'with_gil'")
self.visit_FuncDefNode(node)
return node
def visit_FuncDefNode(self, node):
if "critical_section" in self.directives:
value = self.directives["critical_section"]
if value is not None:
error(node.pos, "critical_section decorator does not take arguments")
new_body = Nodes.CriticalSectionStatNode(
node.pos,
args=[ExprNodes.FirstArgumentForCriticalSectionNode(node.pos, func_node=node)],
body=node.body
)
node.body = new_body
self.visitchildren(node)
return node
def visit_LambdaNode(self, node):
# No directives should modify lambdas or generator expressions (and also nothing in them).
return node
def visit_PyClassDefNode(self, node):
if any(directive in self.directives for directive in self.converts_to_cclass):
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
| AdjustDefByDirectives |
python | plotly__plotly.py | plotly/graph_objs/ohlc/decreasing/_line.py | {
"start": 233,
"end": 4158
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "ohlc.decreasing"
_path_str = "ohlc.decreasing.line"
_valid_props = {"color", "dash", "width"}
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.decreasing.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.ohlc.decreasing.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.decreasing.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | tornadoweb__tornado | tornado/test/iostream_test.py | {
"start": 34169,
"end": 34924
} | class ____(TestIOStreamMixin):
def _make_server_iostream(self, connection, **kwargs):
ssl_ctx = ssl_options_to_context(_server_ssl_options(), server_side=True)
connection = ssl_ctx.wrap_socket(
connection,
server_side=True,
do_handshake_on_connect=False,
)
return SSLIOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return SSLIOStream(
connection, ssl_options=dict(cert_reqs=ssl.CERT_NONE), **kwargs
)
# This will run some tests that are basically redundant but it's the
# simplest way to make sure that it works to pass an SSLContext
# instead of an ssl_options dict to the SSLIOStream constructor.
| TestIOStreamSSL |
python | wandb__wandb | wandb/integration/fastai/__init__.py | {
"start": 1462,
"end": 9261
} | class ____(TrackerCallback):
"""Callback for saving model topology, losses & metrics.
Optionally logs weights, gradients, sample predictions and best trained model.
Args:
learn (fastai.basic_train.Learner): the fast.ai learner to hook.
log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged.
save_model (bool): save model at the end of each epoch. It will also load best model at the end of training.
monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.
mode (str): "auto", "min" or "max" to compare "monitor" values and define best model.
input_type (str): "images" or None. Used to display sample predictions.
validation_data (list): data used for sample predictions if input_type is set.
predictions (int): number of predictions to make if input_type is set and validation_data is None.
seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None.
"""
# Record if watch has been called previously (even in another instance)
_watch_called = False
def __init__(
self,
learn: "fastai.basic_train.Learner",
log: Optional[Literal["gradients", "parameters", "all"]] = "gradients",
save_model: bool = True,
monitor: Optional[str] = None,
mode: Literal["auto", "min", "max"] = "auto",
input_type: Optional[Literal["images"]] = None,
validation_data: Optional[list] = None,
predictions: int = 36,
seed: int = 12345,
) -> None:
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError("You must call wandb.init() before WandbCallback()")
# Adapted from fast.ai "SaveModelCallback"
if monitor is None:
# use default TrackerCallback monitor value
super().__init__(learn, mode=mode)
else:
super().__init__(learn, monitor=monitor, mode=mode)
self.save_model = save_model
self.model_path = Path(wandb.run.dir) / "bestmodel.pth"
self.log = log
self.input_type = input_type
self.best = None
# Select items for sample predictions to see evolution along training
self.validation_data = validation_data
if input_type and not self.validation_data:
wandb_random = random.Random(seed) # For repeatability
predictions = min(predictions, len(learn.data.valid_ds))
indices = wandb_random.sample(range(len(learn.data.valid_ds)), predictions)
self.validation_data = [learn.data.valid_ds[i] for i in indices]
def on_train_begin(self, **kwargs: Any) -> None:
"""Call watch method to log model topology, gradients & weights."""
# Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback"
super().on_train_begin()
# Ensure we don't call "watch" multiple times
if not WandbCallback._watch_called:
WandbCallback._watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
def on_epoch_end(
self, epoch: int, smooth_loss: float, last_metrics: list, **kwargs: Any
) -> None:
"""Log training loss, validation loss and custom metrics & log prediction samples & save model."""
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
wandb.termlog(
f"Better model found at epoch {epoch} with {self.monitor} value: {current}."
)
self.best = current
# Save within wandb folder
with self.model_path.open("wb") as model_file:
self.learn.save(model_file)
# Log sample predictions if learn.predict is available
if self.validation_data:
try:
self._wandb_log_predictions()
except FastaiError as e:
wandb.termwarn(e.message)
self.validation_data = None # prevent from trying again on next loop
except Exception as e:
wandb.termwarn(f"Unable to log prediction samples.\n{e}")
self.validation_data = None # prevent from trying again on next loop
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics)
)
}
wandb.log(logs)
def on_train_end(self, **kwargs: Any) -> None:
"""Load the best model."""
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
if self.model_path.is_file():
with self.model_path.open("rb") as model_file:
self.learn.load(model_file, purge=False)
wandb.termlog(f"Loaded best saved model from {self.model_path}")
def _wandb_log_predictions(self) -> None:
"""Log prediction samples."""
pred_log = []
if self.validation_data is None:
return
for x, y in self.validation_data:
try:
pred = self.learn.predict(x)
except Exception:
raise FastaiError(
'Unable to run "predict" method from Learner to log prediction samples.'
)
# scalar -> likely to be a category
# tensor of dim 1 -> likely to be multicategory
if not pred[1].shape or pred[1].dim() == 1:
pred_log.append(
wandb.Image(
x.data,
caption=f"Ground Truth: {y}\nPrediction: {pred[0]}",
)
)
# most vision datasets have a "show" function we can use
elif hasattr(x, "show"):
# log input data
pred_log.append(wandb.Image(x.data, caption="Input data", grouping=3))
# log label and prediction
for im, capt in ((pred[0], "Prediction"), (y, "Ground Truth")):
# Resize plot to image resolution
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = x.size
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
# Superpose label or prediction to input image
x.show(ax=ax, y=im)
pred_log.append(wandb.Image(fig, caption=capt))
plt.close(fig)
# likely to be an image
elif hasattr(y, "shape") and (
(len(y.shape) == 2) or (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])
):
pred_log.extend(
[
wandb.Image(x.data, caption="Input data", grouping=3),
wandb.Image(pred[0].data, caption="Prediction"),
wandb.Image(y.data, caption="Ground Truth"),
]
)
# we just log input data
else:
pred_log.append(wandb.Image(x.data, caption="Input data"))
wandb.log({"Prediction Samples": pred_log}, commit=False)
| WandbCallback |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1597907,
"end": 1598074
} | class ____(sgqlc.types.Union):
"""Types that can be requested reviewers."""
__schema__ = github_schema
__types__ = (Mannequin, Team, User)
| RequestedReviewer |
python | django__django | tests/select_for_update/models.py | {
"start": 925,
"end": 1042
} | class ____(models.Model):
person = models.OneToOneField(Person, models.CASCADE, related_name="profile")
| PersonProfile |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial004_an_py310.py | {
"start": 234,
"end": 643
} | class ____(BaseModel):
username: str
full_name: str | None = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item,
user: User,
importance: Annotated[int, Body(gt=0)],
q: str | None = None,
):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
if q:
results.update({"q": q})
return results
| User |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 6624,
"end": 6720
} | class ____(SecurityWarning):
"""Warned when using unsupported SSL library"""
| NotOpenSSLWarning |
python | django__django | tests/i18n/test_compilation.py | {
"start": 12744,
"end": 13712
} | class ____(ProjectAndAppTests):
def setUp(self):
super().setUp()
gettext_module._translations = {} # flush cache or test will be useless
def test_nofuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, "locale")]):
call_command("compilemessages", locale=[self.LOCALE], verbosity=0)
with translation.override(self.LOCALE):
self.assertEqual(gettext("Lenin"), "Ленин")
self.assertEqual(gettext("Vodka"), "Vodka")
def test_fuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, "locale")]):
call_command(
"compilemessages", locale=[self.LOCALE], fuzzy=True, verbosity=0
)
with translation.override(self.LOCALE):
self.assertEqual(gettext("Lenin"), "Ленин")
self.assertEqual(gettext("Vodka"), "Водка")
| FuzzyTranslationTest |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 2693,
"end": 2795
} | class ____(Exception):
""" Parameters given matched multiple API resources """
| ResourceNotUniqueError |
python | ahupp__python-magic | magic/__init__.py | {
"start": 541,
"end": 689
} | class ____(Exception):
def __init__(self, message):
super(Exception, self).__init__(message)
self.message = message
| MagicException |
python | numba__numba | numba/core/typing/templates.py | {
"start": 46440,
"end": 48639
} | class ____(object):
"""
A registry of typing declarations. The registry stores such declarations
for functions, attributes and globals.
"""
def __init__(self):
self.functions = []
self.attributes = []
self.globals = []
def register(self, item):
assert issubclass(item, FunctionTemplate)
self.functions.append(item)
return item
def register_attr(self, item):
assert issubclass(item, AttributeTemplate)
self.attributes.append(item)
return item
def register_global(self, val=None, typ=None, **kwargs):
"""
Register the typing of a global value.
Functional usage with a Numba type::
register_global(value, typ)
Decorator usage with a template class::
@register_global(value, typing_key=None)
class Template:
...
"""
if typ is not None:
# register_global(val, typ)
assert val is not None
assert not kwargs
self.globals.append((val, typ))
else:
def decorate(cls, typing_key):
class Template(cls):
key = typing_key
if callable(val):
typ = types.Function(Template)
else:
raise TypeError("cannot infer type for global value %r")
self.globals.append((val, typ))
return cls
# register_global(val, typing_key=None)(<template class>)
assert val is not None
typing_key = kwargs.pop('typing_key', val)
assert not kwargs
if typing_key is val:
# Check the value is globally reachable, as it is going
# to be used as the key.
mod = sys.modules[val.__module__]
if getattr(mod, val.__name__) is not val:
raise ValueError("%r is not globally reachable as '%s.%s'"
% (mod, val.__module__, val.__name__))
def decorator(cls):
return decorate(cls, typing_key)
return decorator
| Registry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.