language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/issue_detection/experiments/test_m_n_plus_one_db_detector.py | {
"start": 947,
"end": 14044
} | class ____(TestCase):
detector = MNPlusOneDBSpanExperimentalDetector
fingerprint_type_id = PerformanceMNPlusOneDBQueriesGroupType.type_id
group_type = PerformanceNPlusOneGroupType
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(
self, event: dict[str, Any], settings: dict[DetectorType, Any] | None = None
) -> list[PerformanceProblem]:
detector_settings = settings or self._settings
detector = self.detector(detector_settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def test_detects_parallel_m_n_plus_one(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
problems = self.find_problems(event)
assert problems == [
PerformanceProblem(
fingerprint=f"1-{self.fingerprint_type_id}-6807a9d5bedb6fdb175b006448cddf8cdf18fbd8",
op="db",
type=self.group_type,
desc="SELECT id, name FROM authors INNER JOIN book_authors ON author_id = id WHERE book_id = $1",
parent_span_ids=[],
cause_span_ids=[],
offender_span_ids=[
"9c5049407f37a364",
"ad1453eb469473f5",
"9ac8fee795f25a28",
"aacda642ff6787c0",
"b231fb2367a40bb2",
"9abcfbac864d1b09",
"a4acb0c08f6c5392",
"a1dbea4273c7a8cf",
"b8467be28b0edef0",
"9677584719fa33f9",
"8c6aa95b24d15772",
"be7d04a1731d5d10",
"baa57006cb44092a",
"a383cd625dff4809",
"9c48fda36f28cb0a",
"82253694a3a68c93",
"8831cccebb865893",
"a2339eabb5c4cf07",
"8ea362c64d8b9fd9",
"b8f8a99b783f7b48",
"87a6041001b4e8f6",
"ab99c67643fd85cf",
"a96783f2f544024a",
"8e110c4aa54e4aa0",
],
evidence_data={
"op": "db",
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": [
"9c5049407f37a364",
"ad1453eb469473f5",
"9ac8fee795f25a28",
"aacda642ff6787c0",
"b231fb2367a40bb2",
"9abcfbac864d1b09",
"a4acb0c08f6c5392",
"a1dbea4273c7a8cf",
"b8467be28b0edef0",
"9677584719fa33f9",
"8c6aa95b24d15772",
"be7d04a1731d5d10",
"baa57006cb44092a",
"a383cd625dff4809",
"9c48fda36f28cb0a",
"82253694a3a68c93",
"8831cccebb865893",
"a2339eabb5c4cf07",
"8ea362c64d8b9fd9",
"b8f8a99b783f7b48",
"87a6041001b4e8f6",
"ab99c67643fd85cf",
"a96783f2f544024a",
"8e110c4aa54e4aa0",
],
},
evidence_display=[],
)
]
assert problems[0].title == "N+1 Query"
def test_detects_prisma_client_m_n_plus_one(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-prisma-client")
# Hardcoded first offender span, pattern span ids, and repititions
first_offender_span_index = next(
index
for index, span in enumerate(event["spans"])
if span["span_id"] == "aa3a15d285888d70"
)
pattern_span_ids = [
"aa3a15d285888d70",
"add16472abc0be2e",
"103c3b3e339c8a0e",
"d8b2e30697d9d493",
"f3edcfe2e505ef57",
"e81194ca91d594e2",
"855092f3cff86380",
]
num_pattern_repetitions = 15
num_spans_in_pattern = len(pattern_span_ids)
num_offender_spans = num_spans_in_pattern * num_pattern_repetitions
# Then use that index to get all the offender spans
offender_span_ids = [
span["span_id"]
for span in event["spans"][
first_offender_span_index : (first_offender_span_index + num_offender_spans)
]
]
problems = self.find_problems(event)
assert len(problems) == 1
problem = problems[0]
assert problem.type == PerformanceNPlusOneGroupType
assert problem.fingerprint == "1-1011-44f4f3cc14f0f8d0c5ae372e5e8c80e7ba84f413"
assert len(problem.offender_span_ids) == num_offender_spans
assert problem.evidence_data is not None
assert problem.evidence_data["number_repeating_spans"] == str(num_offender_spans)
assert problem.evidence_data["offender_span_ids"] == offender_span_ids
assert problem.evidence_data["op"] == "db"
assert problem.evidence_data["parent_span"] == "default - render route (app) /products"
assert problem.evidence_data["parent_span_ids"] == ["1bb013326ff579a4"]
assert problem.evidence_data["transaction_name"] == "GET /products"
def test_prisma_ops_with_different_descriptions(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-prisma-client-different-descriptions")
assert len(self.find_problems(event)) == 1
problem = self.find_problems(event)[0]
assert problem.type == PerformanceNPlusOneGroupType
assert problem.fingerprint == "1-1011-50301e409950f4b1cc0a02d9d172684b4020ae32"
assert len(problem.offender_span_ids) == 10
assert problem.evidence_data is not None
assert problem.evidence_data["number_repeating_spans"] == str(10)
assert (
problem.evidence_data["repeating_spans_compact"][0]
== "UPDATE users SET name = $1, email = $2 WHERE id = $3"
)
assert problem.evidence_data["repeating_spans_compact"][1] == "prisma:engine:serialize"
def test_does_not_detect_truncated_m_n_plus_one(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql-truncated")
assert self.find_problems(event) == []
def test_does_not_detect_n_plus_one(self) -> None:
event = get_event("n-plus-one-db/n-plus-one-in-django-index-view")
assert self.find_problems(event) == []
def test_does_not_detect_when_parent_is_transaction(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql-transaction-parent")
assert self.find_problems(event) == []
@override_options(
{
"performance.issues.experimental_m_n_plus_one_db_queries.problem-creation": 1.0,
}
)
def test_m_n_plus_one_detector_enabled(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
sdk_span_mock = Mock()
_detect_performance_problems(event, sdk_span_mock, self.create_project())
sdk_span_mock.containing_transaction.set_tag.assert_has_calls(
[
# Current + Experimental Detector
call("_pi_all_issue_count", 2),
call("_pi_sdk_name", "sentry.javascript.node"),
call("is_standalone_spans", False),
call("_pi_transaction", "3818ae4f54ba4fa6ac6f68c9e32793c4"),
# Current Detector
call("_pi_m_n_plus_one_db_fp", "1-1011-6807a9d5bedb6fdb175b006448cddf8cdf18fbd8"),
call("_pi_m_n_plus_one_db", "9c5049407f37a364"),
# Experimental Detector
call(
"_pi_experimental_m_n_plus_one_db_queries_fp",
f"1-{self.fingerprint_type_id}-6807a9d5bedb6fdb175b006448cddf8cdf18fbd8",
),
call("_pi_experimental_m_n_plus_one_db_queries", "9c5049407f37a364"),
]
)
def test_m_n_plus_one_does_not_include_extra_span(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-off-by-one")
assert self.find_problems(event) == []
def test_m_n_plus_one_ignores_redis(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-redis")
assert self.find_problems(event) == []
def test_m_n_plus_one_ignores_mostly_not_db(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-mostly-http")
assert self.find_problems(event) == []
def test_respects_project_option(self) -> None:
project = self.create_project()
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
event["project_id"] = project.id
settings = get_detection_settings(project.id)
detector = self.detector(settings, event)
assert detector.is_creation_allowed_for_project(project)
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_queries_detection_enabled": False},
)
settings = get_detection_settings(project.id)
detector = self.detector(settings, event)
assert not detector.is_creation_allowed_for_project(project)
def test_respects_n_plus_one_db_duration_threshold(self) -> None:
project = self.create_project()
# Total duration subceeds the threshold
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_duration_threshold": 500},
)
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
event["project_id"] = project.id
settings = get_detection_settings(project_id=project.id)
assert self.find_problems(event, settings) == []
# Total duration exceeds the threshold
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_duration_threshold": 100},
)
settings = get_detection_settings(project_id=project.id)
assert len(self.find_problems(event, settings)) == 1
@patch("sentry.issue_detection.detectors.experiments.mn_plus_one_db_span_detector.metrics")
def test_ignores_event_below_duration_threshold(self, metrics_mock: MagicMock) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-db-spans-duration-suceeds")
assert self.find_problems(event) == []
metrics_mock.incr.assert_called_with(
"mn_plus_one_db_span_detector.below_duration_threshold"
)
@patch("sentry.issue_detection.detectors.experiments.mn_plus_one_db_span_detector.metrics")
def test_ignores_event_with_low_db_span_percentage(self, metrics_mock: MagicMock) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-db-spans-duration-suceeds")
for index, span in enumerate(event["spans"]):
# Modify spans so each takes 1s, but DB spans take 1ms
duration = 0.001 if span.get("op") == "db" else 1
span["start_timestamp"] = index
span["timestamp"] = index + duration
assert self.find_problems(event) == []
metrics_mock.incr.assert_called_with(
"mn_plus_one_db_span_detector.below_db_span_percentage"
)
@patch("sentry.issue_detection.detectors.experiments.mn_plus_one_db_span_detector.metrics")
def test_ignores_event_with_no_common_parent_span(self, metrics_mock: MagicMock) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-prisma-client")
previous_parent_span_id = None
for span in event["spans"]:
# For all prisma operation spans, nest them within the previous one.
if span.get("description") == "prisma:client:operation":
if previous_parent_span_id:
span["parent_span_id"] = previous_parent_span_id
previous_parent_span_id = span.get("span_id")
assert self.find_problems(event) == []
metrics_mock.incr.assert_called_with("mn_plus_one_db_span_detector.no_parent_span")
@patch("sentry.issue_detection.detectors.experiments.mn_plus_one_db_span_detector.metrics")
def test_ignores_prisma_client_if_depth_config_is_too_small(
self, metrics_mock: MagicMock
) -> None:
settings = deepcopy(self._settings)
settings[self.detector.settings_key]["max_allowable_depth"] = 1
event = get_event("m-n-plus-one-db/m-n-plus-one-prisma-client")
assert self.find_problems(event, settings) == []
metrics_mock.incr.assert_called_with("mn_plus_one_db_span_detector.no_parent_span")
| MNPlusOneDBDetectorTest |
python | google__python-fire | fire/test_components.py | {
"start": 2665,
"end": 2846
} | class ____:
def ten(self):
return 10
def sum(self, alpha=0, beta=0):
return alpha + 2 * beta
def identity(self, alpha, beta='0'):
return alpha, beta
| MixedDefaults |
python | lepture__authlib | authlib/integrations/flask_client/__init__.py | {
"start": 260,
"end": 1765
} | class ____(BaseOAuth):
oauth1_client_cls = FlaskOAuth1App
oauth2_client_cls = FlaskOAuth2App
framework_integration_cls = FlaskIntegration
def __init__(self, app=None, cache=None, fetch_token=None, update_token=None):
super().__init__(
cache=cache, fetch_token=fetch_token, update_token=update_token
)
self.app = app
if app:
self.init_app(app)
def init_app(self, app, cache=None, fetch_token=None, update_token=None):
"""Initialize lazy for Flask app. This is usually used for Flask application
factory pattern.
"""
self.app = app
if cache is not None:
self.cache = cache
if fetch_token:
self.fetch_token = fetch_token
if update_token:
self.update_token = update_token
app.extensions = getattr(app, "extensions", {})
app.extensions["authlib.integrations.flask_client"] = self
def create_client(self, name):
if not self.app:
raise RuntimeError("OAuth is not init with Flask app.")
return super().create_client(name)
def register(self, name, overwrite=False, **kwargs):
self._registry[name] = (overwrite, kwargs)
if self.app:
return self.create_client(name)
return LocalProxy(lambda: self.create_client(name))
__all__ = [
"OAuth",
"FlaskIntegration",
"FlaskOAuth1App",
"FlaskOAuth2App",
"token_update",
"OAuthError",
]
| OAuth |
python | gevent__gevent | src/greentest/3.14/test_threading.py | {
"start": 78986,
"end": 83007
} | class ____(unittest.TestCase):
def test__all__(self):
restore_default_excepthook(self)
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
@unittest.skipUnless(hasattr(_thread, 'set_name'), "missing _thread.set_name")
@unittest.skipUnless(hasattr(_thread, '_get_name'), "missing _thread._get_name")
def test_set_name(self):
# set_name() limit in bytes
truncate = getattr(_thread, "_NAME_MAXLEN", None)
limit = truncate or 100
tests = [
# test short ASCII name
"CustomName",
# test short non-ASCII name
"namé€",
# embedded null character: name is truncated
# at the first null character
"embed\0null",
# Test long ASCII names (not truncated)
"x" * limit,
# Test long ASCII names (truncated)
"x" * (limit + 10),
# Test long non-ASCII name (truncated)
"x" * (limit - 1) + "é€",
# Test long non-BMP names (truncated) creating surrogate pairs
# on Windows
"x" * (limit - 1) + "\U0010FFFF",
"x" * (limit - 2) + "\U0010FFFF" * 2,
"x" + "\U0001f40d" * limit,
"xx" + "\U0001f40d" * limit,
"xxx" + "\U0001f40d" * limit,
"xxxx" + "\U0001f40d" * limit,
]
if os_helper.FS_NONASCII:
tests.append(f"nonascii:{os_helper.FS_NONASCII}")
if os_helper.TESTFN_UNENCODABLE:
tests.append(os_helper.TESTFN_UNENCODABLE)
if sys.platform.startswith("sunos"):
encoding = "utf-8"
else:
encoding = sys.getfilesystemencoding()
def work():
nonlocal work_name
work_name = _thread._get_name()
for name in tests:
if not support.MS_WINDOWS:
encoded = name.encode(encoding, "replace")
if b'\0' in encoded:
encoded = encoded.split(b'\0', 1)[0]
if truncate is not None:
encoded = encoded[:truncate]
if sys.platform.startswith("sunos"):
expected = encoded.decode("utf-8", "surrogateescape")
else:
expected = os.fsdecode(encoded)
else:
size = 0
chars = []
for ch in name:
if ord(ch) > 0xFFFF:
size += 2
else:
size += 1
if size > truncate:
break
chars.append(ch)
expected = ''.join(chars)
if '\0' in expected:
expected = expected.split('\0', 1)[0]
with self.subTest(name=name, expected=expected):
work_name = None
thread = threading.Thread(target=work, name=name)
thread.start()
thread.join()
self.assertEqual(work_name, expected,
f"{len(work_name)=} and {len(expected)=}")
@unittest.skipUnless(hasattr(_thread, 'set_name'), "missing _thread.set_name")
@unittest.skipUnless(hasattr(_thread, '_get_name'), "missing _thread._get_name")
def test_change_name(self):
# Change the name of a thread while the thread is running
name1 = None
name2 = None
def work():
nonlocal name1, name2
name1 = _thread._get_name()
threading.current_thread().name = "new name"
name2 = _thread._get_name()
thread = threading.Thread(target=work, name="name")
thread.start()
thread.join()
self.assertEqual(name1, "name")
self.assertEqual(name2, "new name")
| MiscTestCase |
python | conda__conda | conda/common/serialize/json.py | {
"start": 358,
"end": 1200
} | class ____(json.JSONEncoder):
def default(self, obj: Any) -> Any:
# Python types
if isinstance(obj, Enum):
return obj.value
elif isinstance(obj, Path):
return str(obj)
# auxlib entity types
for attr in ("dump", "__json__", "to_json", "as_json"):
if method := getattr(obj, attr, None):
return method()
# default
return super().default(obj)
def dump(*args, **kwargs):
kwargs.setdefault("cls", CondaJSONEncoder)
kwargs.setdefault("indent", 2)
return json.dump(*args, **kwargs)
def dumps(*args, **kwargs):
kwargs.setdefault("cls", CondaJSONEncoder)
kwargs.setdefault("indent", 2)
return json.dumps(*args, **kwargs)
load = json.load
loads = json.loads
JSONDecodeError = json.JSONDecodeError
| CondaJSONEncoder |
python | cython__cython | runtests.py | {
"start": 20520,
"end": 22295
} | class ____(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t, count=1):
self.test_counts[metric] += count
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items(), key=operator.itemgetter(1), reverse=True):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
| Stats |
python | python-attrs__attrs | typing-examples/baseline.py | {
"start": 225,
"end": 326
} | class ____:
x: int = attrs.field(default=42)
ngc = NGClass(1)
@attrs.mutable(slots=False)
| NGClass |
python | huggingface__transformers | src/transformers/models/doge/modeling_doge.py | {
"start": 3220,
"end": 11191
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: DogeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[DogeConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def flex_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Union[torch.Tensor, "BlockMask"],
scaling: Optional[float] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
block_mask = None
causal_mask = None
if isinstance(attention_mask, BlockMask):
block_mask = attention_mask
else:
causal_mask = attention_mask
if causal_mask is not None:
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
def score_mod(score, batch_idx, head_idx, q_idx, kv_idx):
if softcap is not None:
score = softcap * torch.tanh(score / softcap)
if causal_mask is not None:
score = score + causal_mask[batch_idx][head_idx][q_idx][kv_idx]
return score
attn_output, attention_weights = compile_friendly_flex_attention(
query,
key,
value,
score_mod=score_mod,
block_mask=block_mask,
enable_gqa=True,
scale=scaling,
# Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
# For simplification, we thus always return it as no additional computations are introduced.
return_lse=True,
)
# lse is returned in float32
attention_weights = attention_weights.to(value.dtype)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attention_weights
ALL_ATTENTION_FUNCTIONS = AttentionInterface()
ALL_ATTENTION_FUNCTIONS["doge_flex_attention"] = flex_attention_forward
| DogeRotaryEmbedding |
python | catalyst-team__catalyst | catalyst/contrib/losses/supervised_contrastive.py | {
"start": 56,
"end": 3700
} | class ____(nn.Module):
"""A Contrastive embedding loss that uses targets.
It has been proposed in `Supervised Contrastive Learning`_.
.. _`Supervised Contrastive Learning`:
https://arxiv.org/pdf/2004.11362.pdf
"""
def __init__(
self, tau: float, reduction: str = "mean", pos_aggregation="in"
) -> None:
"""
Args:
tau: temperature
reduction: specifies the reduction to apply to the output:
``"none"`` | ``"mean"`` | ``"sum"``.
``"none"``: no reduction will be applied,
``"mean"``: the sum of the output will be divided by the number of
positive pairs in the output,
``"sum"``: the output will be summed.
pos_aggregation: specifies the place of positive pairs aggregation:
``"in"`` | ``"out"``.
``"in"``: maximization of log(average positive exponentiate similarity)
``"out"``: maximization of average positive similarity
Raises:
ValueError: if reduction is not mean, sum or none
ValueError: if positive aggregation is not in or out
"""
super().__init__()
self.tau = tau
self.self_similarity = 1 / self.tau
self.exp_self_similarity = e ** (1 / self.tau)
self.reduction = reduction
self.pos_aggregation = pos_aggregation
if self.reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Reduction should be: mean, sum, none. But got - {self.reduction}!"
)
if self.pos_aggregation not in ["in", "out"]:
raise ValueError(
"Positive aggregation should be: in or out."
f"But got - {self.pos_aggregation}!"
)
def forward(self, features: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""
Args:
features: [bs; feature_len]
targets: [bs]
Returns:
computed loss
"""
# if ||x|| = ||y|| = 1 then||x-y||^2 = 2 - 2<x,y>
features = torch.nn.functional.normalize(features)
cosine_matrix = (2 - torch.cdist(features, features) ** 2) / 2
exp_cosine_matrix = torch.exp(cosine_matrix / self.tau)
# positive part of the loss
pos_place = targets.repeat(targets.shape[0], 1) == targets.reshape(
targets.shape[0], 1
)
# aggregation of postive pairs
number_of_positives = pos_place.sum(dim=1) - 1
assert (
number_of_positives == 0
).sum().item() == 0, (
"There must be at least one positive example for each sample!"
)
if self.pos_aggregation == "in":
pos_loss = (exp_cosine_matrix * pos_place).sum(
dim=1
) - self.exp_self_similarity
pos_loss = torch.log(pos_loss) - torch.log(number_of_positives.float())
elif self.pos_aggregation == "out":
pos_loss = (
(torch.log(exp_cosine_matrix) * pos_place).sum(dim=1)
- self.self_similarity
) / number_of_positives
# neg part of the loss
exp_sim_sum = exp_cosine_matrix.sum(dim=1) - self.exp_self_similarity
neg_loss = torch.log(exp_sim_sum)
# 2*poss_loss (i,j) and (j,i)
loss = -pos_loss + neg_loss
if self.reduction == "mean":
loss = loss.mean()
elif self.reduction == "sum":
loss = loss.sum()
return loss
__all__ = [SupervisedContrastiveLoss]
| SupervisedContrastiveLoss |
python | ansible__ansible | lib/ansible/_internal/_yaml/_constructor.py | {
"start": 690,
"end": 1128
} | class ____(SafeConstructor, metaclass=abc.ABCMeta):
"""Base class for Ansible YAML constructors."""
@classmethod
@abc.abstractmethod
def _register_constructors(cls) -> None:
"""Method used to register constructors to derived types during class initialization."""
def __init_subclass__(cls, **kwargs) -> None:
"""Initialization for derived types."""
cls._register_constructors()
| _BaseConstructor |
python | pytorch__pytorch | torch/_inductor/runtime/hints.py | {
"start": 699,
"end": 3193
} | class ____(Enum):
SQUARE = 0
DEFAULT = 1
# Define `AttrsDescriptorWrapper` function with clear conditional handling
if has_triton_package():
import triton
import triton.backends.compiler
import triton.compiler.compiler
if hasattr(triton.backends.compiler, "AttrsDescriptor"):
# Triton 3.2.0 - the second implementation
from triton.backends.compiler import AttrsDescriptor
def AttrsDescriptorWrapper(
divisible_by_16=None,
equal_to_1=None,
):
# Prepare the arguments for AttrsDescriptor
kwargs = {
"tt.divisibility": divisible_by_16,
"tt.equal_to": equal_to_1,
}
# Instantiate AttrsDescriptor with the prepared arguments
res = AttrsDescriptor.from_dict(
{"arg_properties": kwargs, "cls": AttrsDescriptor.__name__}
)
assert res.property_values["tt.divisibility"] == 16
assert res.property_values["tt.equal_to"] == 1
return res
elif hasattr(triton.compiler.compiler, "AttrsDescriptor"):
# Triton 3.0.0 - the original implementation
from triton.compiler.compiler import AttrsDescriptor
def AttrsDescriptorWrapper(
divisible_by_16=None,
equal_to_1=None,
):
# Prepare the arguments for AttrsDescriptor
kwargs = {
"divisible_by_16": divisible_by_16,
"equal_to_1": equal_to_1,
}
# Instantiate AttrsDescriptor with the prepared arguments
return AttrsDescriptor(**kwargs)
else:
# Triton in 2025:
# note: there's also a range of triton commits not currently supported
# from ~Dec 9, 2024 to Jan 1 2025, in which AttrsDescriptors are still
# used, but the contents are different.
def AttrsDescriptorWrapper(
divisible_by_16=None,
equal_to_1=None,
):
# pyrefly: ignore [not-iterable]
return {(x,): [["tt.divisibility", 16]] for x in divisible_by_16}
else:
# Define a namedtuple as a fallback when AttrsDescriptor is not available
AttrsDescriptorWrapper = collections.namedtuple( # type: ignore[no-redef, name-match]
# pyrefly: ignore [invalid-argument]
"AttrsDescriptor",
["divisible_by_16", "equal_to_1"],
defaults=[(), ()],
)
_NUM_THREADS_PER_WARP = 32
| TileHint |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/load_target.py | {
"start": 6527,
"end": 6725
} | class ____(WorkspaceLoadTarget):
path: str
def create_origins(self) -> Sequence[CodeLocationOrigin]:
return get_origins_from_toml(self.path)
@record(kw_only=False)
| PyProjectFileTarget |
python | django__django | tests/admin_widgets/models.py | {
"start": 1928,
"end": 2031
} | class ____(models.Model):
release_event = models.ForeignKey(ReleaseEvent, models.CASCADE)
| VideoStream |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_security.py | {
"start": 180,
"end": 3713
} | class ____:
def test_fail_when_http_url_is_found(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
file_with_http_url = tmp_path / "file.py"
file_with_http_url.write_text("http://example.com")
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == f"The following files have http:// URLs:\n\t- {file_with_http_url}"
def test_pass_when_commented_http_url(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
file_with_http_url = tmp_path / "file.py"
file_with_http_url.write_text(f"http://example.com {security.CheckConnectorUsesHTTPSOnly.ignore_comment}")
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "No file with http:// URLs found"
def test_pass_when_http_url_in_ignored_directories(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
for ignored_directory in security.CheckConnectorUsesHTTPSOnly.ignored_directories_for_https_checks:
(tmp_path / ignored_directory).mkdir()
file_with_http_url = tmp_path / ignored_directory / "file.py"
file_with_http_url.write_text("http://example.com")
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "No file with http:// URLs found"
def test_pass_when_http_url_in_ignored_patterns(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
for ignored_pattern in security.CheckConnectorUsesHTTPSOnly.ignored_file_name_pattern_for_https_checks:
file_with_http_url = tmp_path / ignored_pattern.replace("*", "test")
file_with_http_url.write_text("http://example.com")
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "No file with http:// URLs found"
def test_pass_when_http_url_has_ignored_prefix(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
file_with_http_url = tmp_path / "file.py"
for i, ignored_prefix in enumerate(security.CheckConnectorUsesHTTPSOnly.ignored_url_prefixes):
file_with_http_url = tmp_path / f"file_{i}.py"
file_with_http_url.write_text(ignored_prefix)
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "No file with http:// URLs found"
def test_pass_when_https_url(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
file_with_http_url = tmp_path / "file.py"
file_with_http_url.write_text(f"https://example.com")
# Act
result = security.CheckConnectorUsesHTTPSOnly()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "No file with http:// URLs found"
| TestCheckConnectorUsesHTTPSOnly |
python | Textualize__textual | docs/examples/widgets/radio_set.py | {
"start": 165,
"end": 1577
} | class ____(App[None]):
CSS_PATH = "radio_set.tcss"
def compose(self) -> ComposeResult:
with Horizontal():
# A RadioSet built up from RadioButtons.
with RadioSet(id="focus_me"):
yield RadioButton("Battlestar Galactica")
yield RadioButton("Dune 1984")
yield RadioButton("Dune 2021")
yield RadioButton("Serenity", value=True)
yield RadioButton("Star Trek: The Motion Picture")
yield RadioButton("Star Wars: A New Hope")
yield RadioButton("The Last Starfighter")
yield RadioButton(
Text.from_markup(
"Total Recall :backhand_index_pointing_right: :red_circle:"
)
)
yield RadioButton("Wing Commander")
# A RadioSet built up from a collection of strings.
yield RadioSet(
"Amanda",
"Connor MacLeod",
"Duncan MacLeod",
"Heather MacLeod",
"Joe Dawson",
"Kurgan, [bold italic red]The[/]",
"Methos",
"Rachel Ellenstein",
"Ramírez",
)
def on_mount(self) -> None:
self.query_one("#focus_me").focus()
if __name__ == "__main__":
RadioChoicesApp().run()
| RadioChoicesApp |
python | ray-project__ray | python/ray/serve/tests/unit/test_autoscaling_policy.py | {
"start": 1937,
"end": 9646
} | class ____:
def test_bounds_checking(self):
num_replicas = 10
max_replicas = 11
min_replicas = 9
config = AutoscalingConfig(
max_replicas=max_replicas,
min_replicas=min_replicas,
target_ongoing_requests=100,
)
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=150 * num_replicas,
num_running_replicas=num_replicas,
)
assert desired_num_replicas == max_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=50 * num_replicas,
num_running_replicas=num_replicas,
)
assert desired_num_replicas == min_replicas
for i in range(50, 150):
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=i * num_replicas,
num_running_replicas=num_replicas,
)
assert min_replicas <= desired_num_replicas <= max_replicas
@pytest.mark.parametrize("target_requests", [0.5, 1.0, 1.5])
def test_scale_up(self, target_requests):
config = AutoscalingConfig(
min_replicas=0, max_replicas=100, target_ongoing_requests=target_requests
)
num_replicas = 10
num_ongoing_requests = 2 * target_requests * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 19 <= desired_num_replicas <= 21 # 10 * 2 = 20
@pytest.mark.parametrize("target_requests", [0.5, 1.0, 1.5])
def test_scale_down(self, target_requests):
config = AutoscalingConfig(
min_replicas=0, max_replicas=100, target_ongoing_requests=target_requests
)
num_replicas = 10
num_ongoing_requests = 0.5 * target_requests * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 4 <= desired_num_replicas <= 6 # 10 * 0.5 = 5
@pytest.mark.parametrize("use_deprecated_smoothing_factor", [True, False])
def test_scaling_factor(self, use_deprecated_smoothing_factor):
config = {"min_replicas": 0, "max_replicas": 100, "target_ongoing_requests": 2}
if use_deprecated_smoothing_factor:
config["smoothing_factor"] = 0.5
else:
config["upscaling_factor"] = 0.5
config["downscaling_factor"] = 0.5
config = AutoscalingConfig(**config)
num_replicas = 10
num_ongoing_requests = 8.0 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 24 <= desired_num_replicas <= 26 # 10 + 0.5 * (40 - 10) = 25
num_ongoing_requests = 0.25 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 5 <= desired_num_replicas <= 8 # 10 + 0.5 * (2.5 - 10) = 6.25
@pytest.mark.parametrize("use_deprecated_smoothing_factor", [True, False])
def test_upscaling_factor(self, use_deprecated_smoothing_factor):
config = {"min_replicas": 0, "max_replicas": 100, "target_ongoing_requests": 2}
if use_deprecated_smoothing_factor:
config["upscale_smoothing_factor"] = 0.5
else:
config["upscaling_factor"] = 0.5
config = AutoscalingConfig(**config)
num_replicas = 10
# Should use upscale smoothing factor of 0.5
num_ongoing_requests = 8.0 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 24 <= desired_num_replicas <= 26 # 10 + 0.5 * (40 - 10) = 25
# Should use downscale smoothing factor of 1 (default)
num_ongoing_requests = 0.25 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 1 <= desired_num_replicas <= 4 # 10 + (2.5 - 10) = 2.5
@pytest.mark.parametrize("use_deprecated_smoothing_factor", [True, False])
def test_downscaling_factor(self, use_deprecated_smoothing_factor):
config = {"min_replicas": 0, "max_replicas": 100, "target_ongoing_requests": 2}
if use_deprecated_smoothing_factor:
config["downscale_smoothing_factor"] = 0.5
else:
config["downscaling_factor"] = 0.5
config = AutoscalingConfig(**config)
num_replicas = 10
# Should use upscale smoothing factor of 1 (default)
num_ongoing_requests = 8.0 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 39 <= desired_num_replicas <= 41 # 10 + (40 - 10) = 40
# Should use downscale smoothing factor of 0.5
num_ongoing_requests = 0.25 * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=num_ongoing_requests,
num_running_replicas=num_replicas,
)
assert 5 <= desired_num_replicas <= 8 # 10 + 0.5 * (2.5 - 10) = 6.25
@pytest.mark.parametrize(
"num_replicas,ratio,scaling_factor",
[
# All of the parametrized scenarios should downscale by 1
# replica. Compare the first theoretical calculation that's
# with smoothing factor, and the second calculation without
# smoothing factor. In these cases, downscaling should not
# be blocked by fractional smoothing factor.
(2, 0.3, 0.5), # 2 - 0.5 (2 * 0.7) = 1.3 | 2 - (2 * 0.7) = 0.6
(5, 0.4, 0.2), # 5 - 0.2 (5 * 0.6) = 4.4 | 5 - (5 * 0.6) = 2
(10, 0.4, 0.1), # 10 - 0.1 (10 * 0.6) = 9.4 | 10 - (10 * 0.6) = 4
],
)
@pytest.mark.parametrize("use_deprecated_smoothing_factor", [True, False])
def test_downscaling_with_fractional_scaling_factor(
self,
num_replicas: int,
ratio: float,
scaling_factor: float,
use_deprecated_smoothing_factor,
):
config = {"min_replicas": 0, "max_replicas": 100, "target_ongoing_requests": 1}
if use_deprecated_smoothing_factor:
config["downscale_smoothing_factor"] = scaling_factor
else:
config["downscaling_factor"] = scaling_factor
config = AutoscalingConfig(**config)
total_num_requests = ratio * num_replicas
desired_num_replicas = _calculate_desired_num_replicas(
autoscaling_config=config,
total_num_requests=total_num_requests,
num_running_replicas=num_replicas,
)
assert desired_num_replicas == num_replicas - 1
| TestCalculateDesiredNumReplicas |
python | networkx__networkx | networkx/algorithms/connectivity/tests/test_connectivity.py | {
"start": 10856,
"end": 15027
} | class ____:
@classmethod
def setup_class(cls):
cls.path = nx.path_graph(7)
cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph())
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
cls.gnp = nx.gnp_random_graph(30, 0.1, seed=42)
cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True, seed=42)
cls.K20 = nx.complete_graph(20)
cls.K10 = nx.complete_graph(10)
cls.K5 = nx.complete_graph(5)
cls.G_list = [
cls.path,
cls.directed_path,
cls.cycle,
cls.directed_cycle,
cls.gnp,
cls.directed_gnp,
cls.K10,
cls.K5,
cls.K20,
]
def test_cycles(self):
K_undir = nx.all_pairs_node_connectivity(self.cycle)
for source in K_undir:
for target, k in K_undir[source].items():
assert k == 2
K_dir = nx.all_pairs_node_connectivity(self.directed_cycle)
for source in K_dir:
for target, k in K_dir[source].items():
assert k == 1
def test_complete(self):
for G in [self.K10, self.K5, self.K20]:
K = nx.all_pairs_node_connectivity(G)
for source in K:
for target, k in K[source].items():
assert k == len(G) - 1
def test_paths(self):
K_undir = nx.all_pairs_node_connectivity(self.path)
for source in K_undir:
for target, k in K_undir[source].items():
assert k == 1
K_dir = nx.all_pairs_node_connectivity(self.directed_path)
for source in K_dir:
for target, k in K_dir[source].items():
if source < target:
assert k == 1
else:
assert k == 0
def test_all_pairs_connectivity_nbunch(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
C = nx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert len(C) == len(nbunch)
def test_all_pairs_connectivity_icosahedral(self):
G = nx.icosahedral_graph()
C = nx.all_pairs_node_connectivity(G)
assert all(5 == C[u][v] for u, v in itertools.combinations(G, 2))
def test_all_pairs_connectivity(self):
G = nx.Graph()
nodes = [0, 1, 2, 3]
nx.add_path(G, nodes)
A = {n: {} for n in G}
for u, v in itertools.combinations(nodes, 2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert sorted((k, sorted(v)) for k, v in A.items()) == sorted(
(k, sorted(v)) for k, v in C.items()
)
def test_all_pairs_connectivity_directed(self):
G = nx.DiGraph()
nodes = [0, 1, 2, 3]
nx.add_path(G, nodes)
A = {n: {} for n in G}
for u, v in itertools.permutations(nodes, 2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert sorted((k, sorted(v)) for k, v in A.items()) == sorted(
(k, sorted(v)) for k, v in C.items()
)
def test_all_pairs_connectivity_nbunch_combinations(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = {n: {} for n in nbunch}
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert sorted((k, sorted(v)) for k, v in A.items()) == sorted(
(k, sorted(v)) for k, v in C.items()
)
def test_all_pairs_connectivity_nbunch_iter(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = {n: {} for n in nbunch}
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch))
assert sorted((k, sorted(v)) for k, v in A.items()) == sorted(
(k, sorted(v)) for k, v in C.items()
)
| TestAllPairsNodeConnectivity |
python | ray-project__ray | python/ray/train/v2/_internal/state/schema.py | {
"start": 5403,
"end": 5853
} | class ____(TrainWorker):
"""Detailed metadata for a Ray Train worker, including process and GPU stats."""
processStats: Optional[ProcessStats] = Field(
None, description="CPU and memory statistics for the worker process."
)
gpus: List[GPUStats] = Field(
default_factory=list,
description="A list of GPUs used by the worker process,"
" with detailed statistics.",
)
@DeveloperAPI
| DecoratedTrainWorker |
python | vyperlang__vyper | vyper/venom/analysis/analysis.py | {
"start": 153,
"end": 849
} | class ____:
"""
Base class for all Venom IR analyses.
"""
function: IRFunction
analyses_cache: IRAnalysesCache
def __init__(self, analyses_cache: IRAnalysesCache, function: IRFunction):
self.analyses_cache = analyses_cache
self.function = function
def analyze(self, *args, **kwargs):
"""
Override this method to perform the analysis.
"""
raise NotImplementedError
def invalidate(self):
"""
Override this method to respond to an invalidation request, and possibly
invalidate any other analyses that depend on this one.
"""
pass
T = TypeVar("T", bound=IRAnalysis)
| IRAnalysis |
python | pytorch__pytorch | test/package/package_a/test_module.py | {
"start": 1303,
"end": 1481
} | class ____(torch.nn.Module):
def forward(self, x):
x = a_non_torch_leaf(x, x)
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
| SimpleTest |
python | doocs__leetcode | solution/3300-3399/3304.Find the K-th Character in String Game I/Solution.py | {
"start": 0,
"end": 204
} | class ____:
def kthCharacter(self, k: int) -> str:
word = [0]
while len(word) < k:
word.extend([(x + 1) % 26 for x in word])
return chr(ord("a") + word[k - 1])
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/comprehend.py | {
"start": 9825,
"end": 17603
} | class ____(AwsBaseOperator[ComprehendHook]):
"""
Create a comprehend document classifier that can categorize documents.
Provide a set of training documents that are labeled with the categories.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComprehendCreateDocumentClassifierOperator`
:param document_classifier_name: The name of the document classifier. (templated)
:param input_data_config: Specifies the format and location of the input data for the job. (templated)
:param mode: Indicates the mode in which the classifier will be trained. (templated)
:param data_access_role_arn: The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend
read access to your input data. (templated)
:param language_code: The language of the input documents. You can specify any of the languages supported by
Amazon Comprehend. All documents must be in the same language. (templated)
:param fail_on_warnings: If set to True, the document classifier training job will throw an error when the
status is TRAINED_WITH_WARNING. (default False)
:param output_data_config: Specifies the location for the output files from a custom classifier job.
This parameter is required for a request that creates a native document model. (templated)
:param document_classifier_kwargs: Any optional parameters to pass to the document classifier. (templated)
:param wait_for_completion: Whether to wait for job to stop. (default: True)
:param waiter_delay: Time in seconds to wait between status checks. (default: 60)
:param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 20)
:param deferrable: If True, the operator will wait asynchronously for the job to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = ComprehendHook
operator_extra_links = (ComprehendDocumentClassifierLink(),)
template_fields: Sequence[str] = aws_template_fields(
"document_classifier_name",
"input_data_config",
"mode",
"data_access_role_arn",
"language_code",
"output_data_config",
"document_classifier_kwargs",
)
template_fields_renderers: ClassVar[dict] = {
"input_data_config": "json",
"output_data_config": "json",
"document_classifier_kwargs": "json",
}
def __init__(
self,
document_classifier_name: str,
input_data_config: dict[str, Any],
mode: str,
data_access_role_arn: str,
language_code: str,
fail_on_warnings: bool = False,
output_data_config: dict[str, Any] | None = None,
document_classifier_kwargs: dict[str, Any] | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.document_classifier_name = document_classifier_name
self.input_data_config = input_data_config
self.mode = mode
self.data_access_role_arn = data_access_role_arn
self.language_code = language_code
self.fail_on_warnings = fail_on_warnings
self.output_data_config = output_data_config
self.document_classifier_kwargs = document_classifier_kwargs or {}
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> str:
if self.output_data_config:
self.document_classifier_kwargs["OutputDataConfig"] = self.output_data_config
document_classifier_arn = self.hook.conn.create_document_classifier(
DocumentClassifierName=self.document_classifier_name,
InputDataConfig=self.input_data_config,
Mode=self.mode,
DataAccessRoleArn=self.data_access_role_arn,
LanguageCode=self.language_code,
**self.document_classifier_kwargs,
)["DocumentClassifierArn"]
# create the link to console
job_url = ComprehendDocumentClassifierLink.format_str.format(
aws_domain=ComprehendDocumentClassifierLink.get_aws_domain(self.hook.conn_partition),
region_name=self.hook.conn_region_name,
arn=document_classifier_arn,
)
ComprehendDocumentClassifierLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
arn=document_classifier_arn,
)
self.log.info("You can monitor the classifier at %s", job_url)
message_description = f"document classifier {document_classifier_arn} to complete."
if self.deferrable:
self.log.info("Deferring %s", message_description)
self.defer(
trigger=ComprehendCreateDocumentClassifierCompletedTrigger(
document_classifier_arn=document_classifier_arn,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for %s", message_description)
self.hook.get_waiter("create_document_classifier_complete").wait(
DocumentClassifierArn=document_classifier_arn,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
self.hook.validate_document_classifier_training_status(
document_classifier_arn=document_classifier_arn, fail_on_warnings=self.fail_on_warnings
)
return document_classifier_arn
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(
"Error while running comprehend create document classifier: %s", validated_event
)
self.hook.validate_document_classifier_training_status(
document_classifier_arn=validated_event["document_classifier_arn"],
fail_on_warnings=self.fail_on_warnings,
)
self.log.info(
"Comprehend document classifier `%s` complete.", validated_event["document_classifier_arn"]
)
return validated_event["document_classifier_arn"]
| ComprehendCreateDocumentClassifierOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoom/components.py | {
"start": 657,
"end": 1065
} | class ____(type):
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
@dataclass
| SingletonMeta |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 39650,
"end": 39892
} | class ____(dtypes.ExtendedDType):
type: ClassVar[Any] = barrier_dtype
name: ClassVar[str] = "barrier"
num_arrivals: int
orders_tensor_core: bool
def __str__(self):
return self.name
@dataclasses.dataclass(frozen=True)
| BarrierType |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 30925,
"end": 32158
} | class ____:
def __init__(self, maxsize: int = 4096) -> None:
self.maxsize = maxsize
self.reset()
def reset(self) -> None:
self.set: OrderedDict[Any, Any] = OrderedDict()
def add(self, key: Union[str, tuple[object, object]]) -> bool:
if key in self.set:
self.set.move_to_end(key, last=True)
if not config.verbose:
return False
else:
self.set[key] = None
while len(self.set) > self.maxsize:
self.set.popitem(last=False)
return True
graph_break_dup_warning_checker = DuplicateWarningChecker()
def setup_compile_debug() -> contextlib.ExitStack:
compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
if compile_debug:
return add_file_handler()
return contextlib.ExitStack()
def reset_graph_break_dup_checker() -> None:
graph_break_dup_warning_checker.reset()
# Matches ANSI escape sequences (CSI)
ANSI_ESCAPE_PATTERN = re.compile(
r"""
\x1B # ESC
\[ # [
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
""",
re.VERBOSE,
)
| DuplicateWarningChecker |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/component/button.py | {
"start": 317,
"end": 423
} | class ____:
PRIMARY = 1
SECONDARY = 2
SUCCESS = 3
DANGER = 4
LINK = 5
| DiscordButtonStyle |
python | zarr-developers__zarr-python | src/zarr/abc/codec.py | {
"start": 5686,
"end": 5788
} | class ____(BaseCodec[NDBuffer, Buffer]):
"""Base class for array-to-bytes codecs."""
| ArrayBytesCodec |
python | PyCQA__pylint | pylint/checkers/symilar.py | {
"start": 5207,
"end": 5898
} | class ____:
"""A class to handle the numbering of begin and end of successive lines.
:note: Only the end line number can be updated.
"""
__slots__ = ("_end", "_start")
def __init__(self, start: LineNumber, end: LineNumber) -> None:
self._start: LineNumber = start
self._end: LineNumber = end
@property
def start(self) -> LineNumber:
return self._start
@property
def end(self) -> LineNumber:
return self._end
@end.setter
def end(self, value: LineNumber) -> None:
self._end = value
def __repr__(self) -> str:
return f"<SuccessiveLinesLimits <{self._start};{self._end}>>"
| SuccessiveLinesLimits |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/query_constructor/base.py | {
"start": 1381,
"end": 13977
} | class ____(BaseOutputParser[StructuredQuery]):
"""Output parser that parses a structured query."""
ast_parse: Callable
"""Callable that parses dict into internal representation of query language."""
@override
def parse(self, text: str) -> StructuredQuery:
try:
expected_keys = ["query", "filter"]
allowed_keys = ["query", "filter", "limit"]
parsed = parse_and_check_json_markdown(text, expected_keys)
if parsed["query"] is None or len(parsed["query"]) == 0:
parsed["query"] = " "
if parsed["filter"] == "NO_FILTER" or not parsed["filter"]:
parsed["filter"] = None
else:
parsed["filter"] = self.ast_parse(parsed["filter"])
if not parsed.get("limit"):
parsed.pop("limit", None)
return StructuredQuery(
**{k: v for k, v in parsed.items() if k in allowed_keys},
)
except Exception as e:
msg = f"Parsing text\n{text}\n raised following error:\n{e}"
raise OutputParserException(msg) from e
@classmethod
def from_components(
cls,
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
fix_invalid: bool = False, # noqa: FBT001,FBT002
) -> StructuredQueryOutputParser:
"""Create a structured query output parser from components.
Args:
allowed_comparators: allowed comparators
allowed_operators: allowed operators
allowed_attributes: allowed attributes
fix_invalid: whether to fix invalid filter directives
Returns:
a structured query output parser
"""
ast_parse: Callable
if fix_invalid:
def ast_parse(raw_filter: str) -> FilterDirective | None:
filter_directive = cast(
"FilterDirective | None",
get_parser().parse(raw_filter),
)
return fix_filter_directive(
filter_directive,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
)
else:
ast_parse = get_parser(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
).parse
return cls(ast_parse=ast_parse)
def fix_filter_directive(
filter: FilterDirective | None, # noqa: A002
*,
allowed_comparators: Sequence[Comparator] | None = None,
allowed_operators: Sequence[Operator] | None = None,
allowed_attributes: Sequence[str] | None = None,
) -> FilterDirective | None:
"""Fix invalid filter directive.
Args:
filter: Filter directive to fix.
allowed_comparators: allowed comparators. Defaults to all comparators.
allowed_operators: allowed operators. Defaults to all operators.
allowed_attributes: allowed attributes. Defaults to all attributes.
Returns:
Fixed filter directive.
"""
if (
not (allowed_comparators or allowed_operators or allowed_attributes)
) or not filter:
return filter
if isinstance(filter, Comparison):
if allowed_comparators and filter.comparator not in allowed_comparators:
return None
if allowed_attributes and filter.attribute not in allowed_attributes:
return None
return filter
if isinstance(filter, Operation):
if allowed_operators and filter.operator not in allowed_operators:
return None
args = [
cast(
"FilterDirective",
fix_filter_directive(
arg,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
),
)
for arg in filter.arguments
if arg is not None
]
if not args:
return None
if len(args) == 1 and filter.operator in (Operator.AND, Operator.OR):
return args[0]
return Operation(
operator=filter.operator,
arguments=args,
)
return filter
def _format_attribute_info(info: Sequence[AttributeInfo | dict]) -> str:
info_dicts = {}
for i in info:
i_dict = dict(i)
info_dicts[i_dict.pop("name")] = i_dict
return json.dumps(info_dicts, indent=4).replace("{", "{{").replace("}", "}}")
def construct_examples(input_output_pairs: Sequence[tuple[str, dict]]) -> list[dict]:
"""Construct examples from input-output pairs.
Args:
input_output_pairs: Sequence of input-output pairs.
Returns:
List of examples.
"""
examples = []
for i, (_input, output) in enumerate(input_output_pairs):
structured_request = (
json.dumps(output, indent=4).replace("{", "{{").replace("}", "}}")
)
example = {
"i": i + 1,
"user_query": _input,
"structured_request": structured_request,
}
examples.append(example)
return examples
def get_query_constructor_prompt(
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
*,
examples: Sequence | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False,
schema_prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> BasePromptTemplate:
"""Create query construction prompt.
Args:
document_contents: The contents of the document to be queried.
attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators.
allowed_operators: Sequence of allowed operators.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A prompt template that can be used to construct queries.
"""
default_schema_prompt = (
SCHEMA_WITH_LIMIT_PROMPT if enable_limit else DEFAULT_SCHEMA_PROMPT
)
schema_prompt = schema_prompt or default_schema_prompt
attribute_str = _format_attribute_info(attribute_info)
schema = schema_prompt.format(
allowed_comparators=" | ".join(allowed_comparators),
allowed_operators=" | ".join(allowed_operators),
)
if examples and isinstance(examples[0], tuple):
examples = construct_examples(examples)
example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT
prefix = PREFIX_WITH_DATA_SOURCE.format(
schema=schema,
content=document_contents,
attributes=attribute_str,
)
suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1)
else:
examples = examples or (
EXAMPLES_WITH_LIMIT if enable_limit else DEFAULT_EXAMPLES
)
example_prompt = EXAMPLE_PROMPT
prefix = DEFAULT_PREFIX.format(schema=schema)
suffix = DEFAULT_SUFFIX.format(
i=len(examples) + 1,
content=document_contents,
attributes=attribute_str,
)
return FewShotPromptTemplate(
examples=list(examples),
example_prompt=example_prompt,
input_variables=["query"],
suffix=suffix,
prefix=prefix,
**kwargs,
)
@deprecated(
since="0.2.13",
alternative="load_query_constructor_runnable",
removal="1.0",
)
def load_query_constructor_chain(
llm: BaseLanguageModel,
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
examples: list | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False, # noqa: FBT001,FBT002
schema_prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> LLMChain:
"""Load a query constructor chain.
Args:
llm: BaseLanguageModel to use for the chain.
document_contents: The contents of the document to be queried.
attribute_info: Sequence of attributes in the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators. Defaults to all
`Comparator` objects.
allowed_operators: Sequence of allowed operators. Defaults to all `Operator`
objects.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
**kwargs: Arbitrary named params to pass to LLMChain.
Returns:
A LLMChain that can be used to construct queries.
"""
prompt = get_query_constructor_prompt(
document_contents,
attribute_info,
examples=examples,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
enable_limit=enable_limit,
schema_prompt=schema_prompt,
)
allowed_attributes = [
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
for ainfo in attribute_info
]
output_parser = StructuredQueryOutputParser.from_components(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
)
# For backwards compatibility.
prompt.output_parser = output_parser
return LLMChain(llm=llm, prompt=prompt, output_parser=output_parser, **kwargs)
def load_query_constructor_runnable(
llm: BaseLanguageModel,
document_contents: str,
attribute_info: Sequence[AttributeInfo | dict],
*,
examples: Sequence | None = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False,
schema_prompt: BasePromptTemplate | None = None,
fix_invalid: bool = False,
**kwargs: Any,
) -> Runnable:
"""Load a query constructor runnable chain.
Args:
llm: BaseLanguageModel to use for the chain.
document_contents: Description of the page contents of the document to be
queried.
attribute_info: Sequence of attributes in the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators. Defaults to all
`Comparator` objects.
allowed_operators: Sequence of allowed operators. Defaults to all `Operator`
objects.
enable_limit: Whether to enable the limit operator.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
fix_invalid: Whether to fix invalid filter directives by ignoring invalid
operators, comparators and attributes.
kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A Runnable that can be used to construct queries.
"""
prompt = get_query_constructor_prompt(
document_contents,
attribute_info,
examples=examples,
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
enable_limit=enable_limit,
schema_prompt=schema_prompt,
**kwargs,
)
allowed_attributes = [
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
for ainfo in attribute_info
]
output_parser = StructuredQueryOutputParser.from_components(
allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators,
allowed_attributes=allowed_attributes,
fix_invalid=fix_invalid,
)
return prompt | llm | output_parser
| StructuredQueryOutputParser |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 41352,
"end": 41708
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("pt_BR")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in PtBrProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in PtBrProvider.MONTH_NAMES.values()
| TestPtBr |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 49996,
"end": 50615
} | class ____:
def test_field_with_custom_props(self, spec):
spec.components.schema("PatternedObject", schema=PatternedObjectSchema)
result = get_schemas(spec)["PatternedObject"]["properties"]["count"]
assert "x-count" in result
assert result["x-count"] == 1
def test_field_with_custom_props_passed_as_snake_case(self, spec):
spec.components.schema("PatternedObject", schema=PatternedObjectSchema)
result = get_schemas(spec)["PatternedObject"]["properties"]["count2"]
assert "x-count2" in result
assert result["x-count2"] == 2
| TestFieldWithCustomProps |
python | kubernetes-client__python | kubernetes/client/api/coordination_v1beta1_api.py | {
"start": 543,
"end": 121813
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_lease_candidate(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_lease_candidate # noqa: E501
create a LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_lease_candidate(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1LeaseCandidate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_lease_candidate_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_lease_candidate_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_lease_candidate # noqa: E501
create a LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_lease_candidate_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1LeaseCandidate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_lease_candidate(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_lease_candidate # noqa: E501
delete collection of LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_lease_candidate(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_lease_candidate_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_lease_candidate_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_lease_candidate # noqa: E501
delete collection of LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_lease_candidate_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_lease_candidate(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_lease_candidate # noqa: E501
delete a LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_lease_candidate(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_lease_candidate_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_lease_candidate_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_lease_candidate # noqa: E501
delete a LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_lease_candidate_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_lease_candidate_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_lease_candidate_for_all_namespaces # noqa: E501
list or watch objects of kind LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_lease_candidate_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_lease_candidate_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_lease_candidate_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_lease_candidate_for_all_namespaces # noqa: E501
list or watch objects of kind LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_lease_candidate_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_lease_candidate_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/leasecandidates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_lease_candidate(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_lease_candidate # noqa: E501
list or watch objects of kind LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_lease_candidate(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_lease_candidate_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_lease_candidate_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_lease_candidate # noqa: E501
list or watch objects of kind LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_lease_candidate_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_lease_candidate(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_lease_candidate # noqa: E501
partially update the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_lease_candidate(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_lease_candidate_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_lease_candidate_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_lease_candidate # noqa: E501
partially update the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_lease_candidate_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_lease_candidate(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_lease_candidate # noqa: E501
read the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_lease_candidate(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_lease_candidate_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_lease_candidate_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_lease_candidate # noqa: E501
read the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_lease_candidate_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_lease_candidate(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_lease_candidate # noqa: E501
replace the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_lease_candidate(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1LeaseCandidate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1beta1LeaseCandidate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_lease_candidate_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_lease_candidate_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_lease_candidate # noqa: E501
replace the specified LeaseCandidate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_lease_candidate_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the LeaseCandidate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1LeaseCandidate body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1beta1LeaseCandidate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_lease_candidate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_lease_candidate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_lease_candidate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasecandidates/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1LeaseCandidate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| CoordinationV1beta1Api |
python | jazzband__django-pipeline | pipeline/storage.py | {
"start": 3687,
"end": 3792
} | class ____(
NonPackagingMixin, ManifestStaticFilesStorage
):
pass
| NonPackagingPipelineManifestStorage |
python | getsentry__sentry | src/sentry/snuba/metrics/naming_layer/public.py | {
"start": 5171,
"end": 5788
} | class ____(Enum):
USER = "span.user"
DURATION = "span.duration"
SELF_TIME = "span.exclusive_time"
SELF_TIME_LIGHT = "span.exclusive_time_light"
RESPONSE_CONTENT_LENGTH = "http.response_content_length"
DECODED_RESPONSE_CONTENT_LENGTH = "http.decoded_response_content_length"
RESPONSE_TRANSFER_SIZE = "http.response_transfer_size"
CACHE_ITEM_SIZE = "cache.item_size"
HTTP_ERROR_COUNT = "span.http_error_count"
HTTP_ERROR_RATE = "span.http_error_rate"
HTTP_ERROR_COUNT_LIGHT = "span.http_error_count_light"
HTTP_ERROR_RATE_LIGHT = "span.http_error_rate_light"
| SpanMetricKey |
python | huggingface__transformers | tests/quantization/gptq/test_gptq.py | {
"start": 13304,
"end": 16609
} | class ____(unittest.TestCase):
"""
Test GPTQ model with exllama kernel and desc_act=True (also known as act-order).
More information on those arguments here:
https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig
"""
EXPECTED_OUTPUTS = set()
# flaky test: gptqmodel and auto-gptq are not output equivalent nor is string compare deterministic even between transformer/torch versions
EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.")
# 4bit + act_order + 128g
model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ"
input_text = "Hello, how are you ?"
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True)
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_quantized_layers_type(self):
self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama")
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model)
def test_max_input_length(self):
"""
Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend.
"""
prompt = "I am in Paris and" * 1000
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] > 4028)
with self.assertRaises(RuntimeError) as cm:
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
self.assertTrue("temp_state buffer is too small" in str(cm.exception))
prompt = "I am in Paris and"
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] < 4028)
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
@slow
@require_optimum
@require_gptq
@require_torch_gpu
@require_accelerate
| GPTQTestActOrderExllama |
python | numba__numba | numba/tests/test_lists.py | {
"start": 39263,
"end": 42534
} | class ____(MemoryLeakMixin, TestCase):
"""Tests that lists carry their initial value if present"""
def test_homogeneous_and_literal(self):
def bar(l):
...
@overload(bar)
def ol_bar(l):
if l.initial_value is None:
return lambda l: literally(l)
self.assertTrue(isinstance(l, types.List))
self.assertEqual(l.initial_value, [1, 2, 3])
self.assertEqual(hasattr(l, 'literal_value'), False)
return lambda l: l
@njit
def foo():
# keys and values all have literal representation
x = [1, 2, 3]
bar(x)
foo()
def test_heterogeneous_but_castable_to_homogeneous(self):
def bar(l):
...
@overload(bar)
def ol_bar(l):
self.assertTrue(isinstance(l, types.List))
self.assertEqual(l.initial_value, None)
self.assertEqual(hasattr(l, 'literal_value'), False)
return lambda l: l
@njit
def foo():
# This list will be typed based on 1j, i.e. complex128
# as the values are not all literals, there's no "initial_value"
# available irrespective of whether it's possible to rip this
# information out of the bytecode.
x = [1j, 2, 3]
bar(x)
foo()
def test_mutation_not_carried(self):
def bar(d):
...
@overload(bar)
def ol_bar(d):
if d.initial_value is None:
return lambda d: literally(d)
self.assertTrue(isinstance(d, types.List))
self.assertEqual(d.initial_value, [1, 2, 3])
return lambda d: d
@njit
def foo():
# This list is mutated, check the initial_value carries
# correctly and is not mutated
x = [1, 2, 3]
x.append(4)
bar(x)
foo()
def test_mutation_not_carried_single_function(self):
# this is another pattern for using literally
@njit
def nop(*args):
pass
for fn, iv in (nop, None), (literally, [1, 2, 3]):
@njit
def baz(x):
pass
def bar(z):
pass
@overload(bar)
def ol_bar(z):
def impl(z):
fn(z)
baz(z)
return impl
@njit
def foo():
x = [1, 2, 3]
bar(x)
x.append(2)
return x
foo()
# baz should be specialised based on literally being invoked and
# the literal/unliteral arriving at the call site
larg = baz.signatures[0][0]
self.assertEqual(larg.initial_value, iv)
def test_list_of_list_ctor(self):
# see issue 6082
@njit
def bar(x):
pass
@njit
def foo():
x = [[1, 2, 3, 4, 5], [1, 2, 3, 4, 6]]
bar(x)
foo()
larg = bar.signatures[0][0]
self.assertEqual(larg.initial_value, None)
self.assertEqual(larg.dtype.initial_value, None)
| TestListInitialValues |
python | pandas-dev__pandas | pandas/core/indexers/objects.py | {
"start": 5227,
"end": 7047
} | class ____(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
self.index_array, # type: ignore[arg-type]
)
@set_module("pandas.api.indexers")
| VariableWindowIndexer |
python | tensorflow__tensorflow | tensorflow/python/framework/test_util_test.py | {
"start": 40419,
"end": 40818
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
| GraphAndEagerNoVariableSharing |
python | google__pytype | pytype/matcher_test.py | {
"start": 311,
"end": 1115
} | class ____(test_base.UnitTest):
def setUp(self):
super().setUp()
options = config.Options.create(
python_version=self.python_version, none_is_not_bool=True
)
self.ctx = test_utils.make_context(options)
self.matcher = self.ctx.matcher(self.ctx.root_node)
def _match_var(self, left, right):
var = self.ctx.program.NewVariable()
var.AddBinding(left, [], self.ctx.root_node)
for view in abstract_utils.get_views([var], self.ctx.root_node):
yield self.matcher.match_var_against_type(var, right, {}, view)
def assertMatch(self, left, right):
for match in self._match_var(left, right):
self.assertEqual(match, {})
def assertNoMatch(self, left, right):
for match in self._match_var(left, right):
self.assertIsNone(match)
| MatcherTestBase |
python | dask__distributed | distributed/http/scheduler/missing_bokeh.py | {
"start": 190,
"end": 629
} | class ____(RequestHandler):
@log_errors
def get(self):
self.write(
f"<p>Dask needs {BOKEH_REQUIREMENT} for the dashboard.</p>"
f'<p>Install with conda: <code>conda install "{BOKEH_REQUIREMENT}"</code></p>'
f'<p>Install with pip: <code>pip install "{BOKEH_REQUIREMENT}"</code></p>'
)
routes: list[tuple] = [(r"/", redirect("status"), {}), (r"status", MissingBokeh, {})]
| MissingBokeh |
python | faif__python-patterns | patterns/other/blackboard.py | {
"start": 2608,
"end": 3216
} | class ____(AbstractExpert):
"""Concrete class for a scientist expert."""
def __init__(self, blackboard) -> None:
super().__init__(blackboard)
@property
def is_eager_to_contribute(self) -> int:
return random.randint(0, 1)
def contribute(self) -> None:
self.blackboard.common_state["problems"] += random.randint(10, 20)
self.blackboard.common_state["suggestions"] += random.randint(10, 20)
self.blackboard.common_state["contributions"] += [self.__class__.__name__]
self.blackboard.common_state["progress"] += random.randint(10, 30)
| Scientist |
python | eth-brownie__brownie | brownie/_cli/console.py | {
"start": 2823,
"end": 3942
} | class ____:
"""
Custom printer during console input.
Ensures that stdout of the active prompt buffer is preserved when the console
is written to during user input.
"""
_builtins_print: Final = builtins.print
def __init__(self, console):
self.console: Final = console
def start(self) -> None:
builtins.print = self
def __call__(self, *values, sep=" ", end="\n", file=sys.stdout, flush=False):
if file != sys.stdout:
self._builtins_print(*values, sep=sep, end=end, file=file, flush=flush)
return
ps = sys.ps2 if self.console.buffer else sys.ps1
line = f"{ps}{self.console.prompt_session.app.current_buffer.text}"
# overwrite the prompt output with whitespace, in case the printed data is shorter
self.console.write(f"\r{' ' * len(line)}\r")
if not end.endswith("\n"):
end = "{end}\n"
text = f"{sep.join(str(i) for i in values)}{end}{line}"
self.console.write(text)
def finish(self) -> None:
builtins.print = self._builtins_print
@final
| ConsolePrinter |
python | PyCQA__pylint | tests/functional/n/no/no_member_dataclasses.py | {
"start": 1082,
"end": 1457
} | class ____(DeploymentState):
current: Any
candidate: Any
def to_dict(self) -> Dict:
return {
'type': self.type, # No error here
'current': asdict(self.current),
'candidate': asdict(self.candidate) if self.candidate else None,
}
# https://github.com/pylint-dev/pylint/issues/2600
@dataclass
| DeploymentStateLambda |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/pg_tf.py | {
"start": 827,
"end": 1261
} | class ____:
def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True):
self.W = tf.Variable(tf.random_normal(shape=(M1, M2)))
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
# approximates pi(a | s)
| HiddenLayer |
python | pytorch__pytorch | test/test_mkldnn_verbose.py | {
"start": 142,
"end": 1482
} | class ____(TestCase):
def test_verbose_on(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkldnn_verbose.py --verbose-level=1', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("onednn_verbose"):
num = num + 1
elif line == 'Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope.':
return
self.assertTrue(num > 0, 'oneDNN verbose messages not found.')
def test_verbose_off(self):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(f'{sys.executable} -u {loc}/mkldnn_verbose.py --verbose-level=0', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.startswith("onednn_verbose"):
num = num + 1
self.assertEqual(num, 0, 'unexpected oneDNN verbose messages found.')
if __name__ == '__main__':
run_tests()
| TestMKLDNNVerbose |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/parser.py | {
"start": 1125,
"end": 20905
} | class ____(object):
__slots__ = 'start', 'end', 'source'
def __init__(self, start, end, source=None):
self.start = start
self.end = end
self.source = source
def __repr__(self):
source = ' source={}'.format(self.source) if self.source else ''
return '<Loc start={} end={}{}>'.format(self.start, self.end, source)
def __eq__(self, other):
return (
isinstance(other, Loc) and
self.start == other.start and
self.end == other.end and
self.source == other.source
)
def loc(parser, start):
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options['no_location']:
return None
if parser.options['no_source']:
return Loc(start, parser.prev_end)
return Loc(start, parser.prev_end, parser.source)
def advance(parser):
"""Moves the internal parser object to the next lexed token."""
prev_end = parser.token.end
parser.prev_end = prev_end
parser.token = parser.lexer.next_token(prev_end)
def peek(parser, kind):
"""Determines if the next token is of a given kind"""
return parser.token.kind == kind
def skip(parser, kind):
"""If the next token is of the given kind, return true after advancing
the parser. Otherwise, do not change the parser state
and throw an error."""
match = parser.token.kind == kind
if match:
advance(parser)
return match
def expect(parser, kind):
"""If the next token is of the given kind, return that token after
advancing the parser. Otherwise, do not change the parser state and
return False."""
token = parser.token
if token.kind == kind:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u'Expected {}, found {}'.format(
get_token_kind_desc(kind),
get_token_desc(token)
)
)
def expect_keyword(parser, value):
"""If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False."""
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u'Expected "{}", found {}'.format(value, get_token_desc(token))
)
def unexpected(parser, at_token=None):
"""Helper function for creating an error when an unexpected lexed token
is encountered."""
token = at_token or parser.token
return GraphQLSyntaxError(
parser.source,
token.start,
u'Unexpected {}'.format(get_token_desc(token))
)
def any(parser, open_kind, parse_fn, close_kind):
"""Returns a possibly empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = []
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes
def many(parser, open_kind, parse_fn, close_kind):
"""Returns a non-empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = [parse_fn(parser)]
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes
def parse_name(parser):
"""Converts a name lex token into a name parse node."""
token = expect(parser, TokenKind.NAME)
return ast.Name(
value=token.value,
loc=loc(parser, token.start)
)
# Implements the parsing rules in the Document section.
def parse_document(parser):
start = parser.token.start
definitions = []
while True:
definitions.append(parse_definition(parser))
if skip(parser, TokenKind.EOF):
break
return ast.Document(
definitions=definitions,
loc=loc(parser, start)
)
def parse_definition(parser):
if peek(parser, TokenKind.BRACE_L):
return parse_operation_definition(parser)
if peek(parser, TokenKind.NAME):
name = parser.token.value
if name in ('query', 'mutation', 'subscription'):
return parse_operation_definition(parser)
elif name == 'fragment':
return parse_fragment_definition(parser)
elif name in ('schema', 'scalar', 'type', 'interface', 'union', 'enum', 'input', 'extend', 'directive'):
return parse_type_system_definition(parser)
raise unexpected(parser)
# Implements the parsing rules in the Operations section.
def parse_operation_definition(parser):
start = parser.token.start
if peek(parser, TokenKind.BRACE_L):
return ast.OperationDefinition(
operation='query',
name=None,
variable_definitions=None,
directives=[],
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
operation = parse_operation_type(parser)
name = None
if peek(parser, TokenKind.NAME):
name = parse_name(parser)
return ast.OperationDefinition(
operation=operation,
name=name,
variable_definitions=parse_variable_definitions(parser),
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
def parse_operation_type(parser):
operation_token = expect(parser, TokenKind.NAME)
operation = operation_token.value
if operation == 'query':
return 'query'
elif operation == 'mutation':
return 'mutation'
elif operation == 'subscription':
return 'subscription'
raise unexpected(parser, operation_token)
def parse_variable_definitions(parser):
if peek(parser, TokenKind.PAREN_L):
return many(
parser,
TokenKind.PAREN_L,
parse_variable_definition,
TokenKind.PAREN_R
)
return []
def parse_variable_definition(parser):
start = parser.token.start
return ast.VariableDefinition(
variable=parse_variable(parser),
type=expect(parser, TokenKind.COLON) and parse_type(parser),
default_value=parse_value_literal(parser, True) if skip(parser, TokenKind.EQUALS) else None,
loc=loc(parser, start)
)
def parse_variable(parser):
start = parser.token.start
expect(parser, TokenKind.DOLLAR)
return ast.Variable(
name=parse_name(parser),
loc=loc(parser, start)
)
def parse_selection_set(parser):
start = parser.token.start
return ast.SelectionSet(
selections=many(parser, TokenKind.BRACE_L, parse_selection, TokenKind.BRACE_R),
loc=loc(parser, start)
)
def parse_selection(parser):
if peek(parser, TokenKind.SPREAD):
return parse_fragment(parser)
else:
return parse_field(parser)
def parse_field(parser):
# Corresponds to both Field and Alias in the spec
start = parser.token.start
name_or_alias = parse_name(parser)
if skip(parser, TokenKind.COLON):
alias = name_or_alias
name = parse_name(parser)
else:
alias = None
name = name_or_alias
return ast.Field(
alias=alias,
name=name,
arguments=parse_arguments(parser),
directives=parse_directives(parser),
selection_set=parse_selection_set(parser) if peek(parser, TokenKind.BRACE_L) else None,
loc=loc(parser, start)
)
def parse_arguments(parser):
if peek(parser, TokenKind.PAREN_L):
return many(
parser, TokenKind.PAREN_L,
parse_argument, TokenKind.PAREN_R)
return []
def parse_argument(parser):
start = parser.token.start
return ast.Argument(
name=parse_name(parser),
value=expect(parser, TokenKind.COLON) and parse_value_literal(parser, False),
loc=loc(parser, start)
)
# Implements the parsing rules in the Fragments section.
def parse_fragment(parser):
# Corresponds to both FragmentSpread and InlineFragment in the spec
start = parser.token.start
expect(parser, TokenKind.SPREAD)
if peek(parser, TokenKind.NAME) and parser.token.value != 'on':
return ast.FragmentSpread(
name=parse_fragment_name(parser),
directives=parse_directives(parser),
loc=loc(parser, start)
)
type_condition = None
if parser.token.value == 'on':
advance(parser)
type_condition = parse_named_type(parser)
return ast.InlineFragment(
type_condition=type_condition,
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
def parse_fragment_definition(parser):
start = parser.token.start
expect_keyword(parser, 'fragment')
return ast.FragmentDefinition(
name=parse_fragment_name(parser),
type_condition=expect_keyword(parser, 'on') and parse_named_type(parser),
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
def parse_fragment_name(parser):
if parser.token.value == 'on':
raise unexpected(parser)
return parse_name(parser)
def parse_value_literal(parser, is_const):
token = parser.token
if token.kind == TokenKind.BRACKET_L:
return parse_list(parser, is_const)
elif token.kind == TokenKind.BRACE_L:
return parse_object(parser, is_const)
elif token.kind == TokenKind.INT:
advance(parser)
return ast.IntValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.FLOAT:
advance(parser)
return ast.FloatValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.STRING:
advance(parser)
return ast.StringValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.NAME:
if token.value in ('true', 'false'):
advance(parser)
return ast.BooleanValue(value=token.value == 'true', loc=loc(parser, token.start))
if token.value != 'null':
advance(parser)
return ast.EnumValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.DOLLAR:
if not is_const:
return parse_variable(parser)
raise unexpected(parser)
# Implements the parsing rules in the Values section.
def parse_variable_value(parser):
return parse_value_literal(parser, False)
def parse_const_value(parser):
return parse_value_literal(parser, True)
def parse_list(parser, is_const):
start = parser.token.start
item = parse_const_value if is_const else parse_variable_value
return ast.ListValue(
values=any(
parser, TokenKind.BRACKET_L,
item, TokenKind.BRACKET_R),
loc=loc(parser, start)
)
def parse_object(parser, is_const):
start = parser.token.start
expect(parser, TokenKind.BRACE_L)
fields = []
while not skip(parser, TokenKind.BRACE_R):
fields.append(parse_object_field(parser, is_const))
return ast.ObjectValue(fields=fields, loc=loc(parser, start))
def parse_object_field(parser, is_const):
start = parser.token.start
return ast.ObjectField(
name=parse_name(parser),
value=expect(parser, TokenKind.COLON) and parse_value_literal(parser, is_const),
loc=loc(parser, start)
)
# Implements the parsing rules in the Directives section.
def parse_directives(parser):
directives = []
while peek(parser, TokenKind.AT):
directives.append(parse_directive(parser))
return directives
def parse_directive(parser):
start = parser.token.start
expect(parser, TokenKind.AT)
return ast.Directive(
name=parse_name(parser),
arguments=parse_arguments(parser),
loc=loc(parser, start),
)
# Implements the parsing rules in the Types section.
def parse_type(parser):
"""Handles the 'Type': TypeName, ListType, and NonNullType
parsing rules."""
start = parser.token.start
if skip(parser, TokenKind.BRACKET_L):
ast_type = parse_type(parser)
expect(parser, TokenKind.BRACKET_R)
ast_type = ast.ListType(type=ast_type, loc=loc(parser, start))
else:
ast_type = parse_named_type(parser)
if skip(parser, TokenKind.BANG):
return ast.NonNullType(type=ast_type, loc=loc(parser, start))
return ast_type
def parse_named_type(parser):
start = parser.token.start
return ast.NamedType(
name=parse_name(parser),
loc=loc(parser, start),
)
def parse_type_system_definition(parser):
'''
TypeSystemDefinition :
- SchemaDefinition
- TypeDefinition
- TypeExtensionDefinition
- DirectiveDefinition
TypeDefinition :
- ScalarTypeDefinition
- ObjectTypeDefinition
- InterfaceTypeDefinition
- UnionTypeDefinition
- EnumTypeDefinition
- InputObjectTypeDefinition
'''
if not peek(parser, TokenKind.NAME):
raise unexpected(parser)
name = parser.token.value
if name == 'schema':
return parse_schema_definition(parser)
elif name == 'scalar':
return parse_scalar_type_definition(parser)
elif name == 'type':
return parse_object_type_definition(parser)
elif name == 'interface':
return parse_interface_type_definition(parser)
elif name == 'union':
return parse_union_type_definition(parser)
elif name == 'enum':
return parse_enum_type_definition(parser)
elif name == 'input':
return parse_input_object_type_definition(parser)
elif name == 'extend':
return parse_type_extension_definition(parser)
elif name == 'directive':
return parse_directive_definition(parser)
raise unexpected(parser)
def parse_schema_definition(parser):
start = parser.token.start
expect_keyword(parser, 'schema')
directives = parse_directives(parser)
operation_types = many(
parser,
TokenKind.BRACE_L,
parse_operation_type_definition,
TokenKind.BRACE_R
)
return ast.SchemaDefinition(
directives=directives,
operation_types=operation_types,
loc=loc(parser, start)
)
def parse_operation_type_definition(parser):
start = parser.token.start
operation = parse_operation_type(parser)
expect(parser, TokenKind.COLON)
return ast.OperationTypeDefinition(
operation=operation,
type=parse_named_type(parser),
loc=loc(parser, start)
)
def parse_scalar_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'scalar')
return ast.ScalarTypeDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
loc=loc(parser, start),
)
def parse_object_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'type')
return ast.ObjectTypeDefinition(
name=parse_name(parser),
interfaces=parse_implements_interfaces(parser),
directives=parse_directives(parser),
fields=any(
parser,
TokenKind.BRACE_L,
parse_field_definition,
TokenKind.BRACE_R
),
loc=loc(parser, start),
)
def parse_implements_interfaces(parser):
types = []
if parser.token.value == 'implements':
advance(parser)
while True:
types.append(parse_named_type(parser))
if not peek(parser, TokenKind.NAME):
break
return types
def parse_field_definition(parser):
start = parser.token.start
return ast.FieldDefinition(
name=parse_name(parser),
arguments=parse_argument_defs(parser),
type=expect(parser, TokenKind.COLON) and parse_type(parser),
directives=parse_directives(parser),
loc=loc(parser, start),
)
def parse_argument_defs(parser):
if not peek(parser, TokenKind.PAREN_L):
return []
return many(parser, TokenKind.PAREN_L, parse_input_value_def, TokenKind.PAREN_R)
def parse_input_value_def(parser):
start = parser.token.start
return ast.InputValueDefinition(
name=parse_name(parser),
type=expect(parser, TokenKind.COLON) and parse_type(parser),
default_value=parse_const_value(parser) if skip(parser, TokenKind.EQUALS) else None,
directives=parse_directives(parser),
loc=loc(parser, start),
)
def parse_interface_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'interface')
return ast.InterfaceTypeDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
fields=any(parser, TokenKind.BRACE_L, parse_field_definition, TokenKind.BRACE_R),
loc=loc(parser, start),
)
def parse_union_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'union')
return ast.UnionTypeDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
types=expect(parser, TokenKind.EQUALS) and parse_union_members(parser),
loc=loc(parser, start),
)
def parse_union_members(parser):
members = []
while True:
members.append(parse_named_type(parser))
if not skip(parser, TokenKind.PIPE):
break
return members
def parse_enum_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'enum')
return ast.EnumTypeDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
values=many(parser, TokenKind.BRACE_L, parse_enum_value_definition, TokenKind.BRACE_R),
loc=loc(parser, start),
)
def parse_enum_value_definition(parser):
start = parser.token.start
return ast.EnumValueDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
loc=loc(parser, start),
)
def parse_input_object_type_definition(parser):
start = parser.token.start
expect_keyword(parser, 'input')
return ast.InputObjectTypeDefinition(
name=parse_name(parser),
directives=parse_directives(parser),
fields=any(parser, TokenKind.BRACE_L, parse_input_value_def, TokenKind.BRACE_R),
loc=loc(parser, start),
)
def parse_type_extension_definition(parser):
start = parser.token.start
expect_keyword(parser, 'extend')
return ast.TypeExtensionDefinition(
definition=parse_object_type_definition(parser),
loc=loc(parser, start)
)
def parse_directive_definition(parser):
start = parser.token.start
expect_keyword(parser, 'directive')
expect(parser, TokenKind.AT)
name = parse_name(parser)
args = parse_argument_defs(parser)
expect_keyword(parser, 'on')
locations = parse_directive_locations(parser)
return ast.DirectiveDefinition(
name=name,
locations=locations,
arguments=args,
loc=loc(parser, start)
)
def parse_directive_locations(parser):
locations = []
while True:
locations.append(parse_name(parser))
if not skip(parser, TokenKind.PIPE):
break
return locations
| Loc |
python | streamlit__streamlit | lib/streamlit/cursor.py | {
"start": 3203,
"end": 4542
} | class ____(Cursor):
def __init__(self, root_container: int, parent_path: tuple[int, ...] = ()) -> None:
"""A moving pointer to a delta location in the app.
RunningCursors auto-increment to the next available location when you
call get_locked_cursor() on them.
Parameters
----------
root_container: int
The root container this cursor lives in.
parent_path: tuple of ints
The full path of this cursor, consisting of the IDs of all ancestors.
The 0th item is the topmost ancestor.
"""
self._root_container = root_container
self._parent_path = parent_path
self._index = 0
@property
def root_container(self) -> int:
return self._root_container
@property
def parent_path(self) -> tuple[int, ...]:
return self._parent_path
@property
def index(self) -> int:
return self._index
@property
def is_locked(self) -> bool:
return False
def get_locked_cursor(self, **props: Any) -> LockedCursor:
locked_cursor = LockedCursor(
root_container=self._root_container,
parent_path=self._parent_path,
index=self._index,
**props,
)
self._index += 1
return locked_cursor
| RunningCursor |
python | pennersr__django-allauth | allauth/mfa/app_settings.py | {
"start": 61,
"end": 4596
} | class ____:
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def ADAPTER(self):
return self._setting("ADAPTER", "allauth.mfa.adapter.DefaultMFAAdapter")
@property
def ALLOW_UNVERIFIED_EMAIL(self) -> bool:
return self._setting("ALLOW_UNVERIFIED_EMAIL", False)
@property
def FORMS(self):
return self._setting("FORMS", {})
@property
def RECOVERY_CODE_COUNT(self):
"""
The number of recovery codes.
"""
return self._setting("RECOVERY_CODE_COUNT", 10)
@property
def RECOVERY_CODE_DIGITS(self):
"""
The number of digits of each recovery code.
"""
return self._setting("RECOVERY_CODE_DIGITS", 8)
@property
def TOTP_PERIOD(self):
"""
The period that a TOTP code will be valid for, in seconds.
"""
return self._setting("TOTP_PERIOD", 30)
@property
def TOTP_DIGITS(self):
"""
The number of digits for TOTP codes
"""
return self._setting("TOTP_DIGITS", 6)
@property
def TOTP_ISSUER(self):
"""
The issuer.
"""
return self._setting("TOTP_ISSUER", "")
@property
def TOTP_INSECURE_BYPASS_CODE(self):
"""
Don't use this on production. Useful for development & E2E tests only.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
code = self._setting("TOTP_INSECURE_BYPASS_CODE", None)
if (not settings.DEBUG) and code:
raise ImproperlyConfigured(
"MFA_TOTP_INSECURE_BYPASS_CODE is for testing purposes only"
)
return code
@property
def TOTP_TOLERANCE(self):
"""
The number of time steps in the past or future to allow. Lower values are more secure, but more likely to fail due to clock drift.
"""
return self._setting("TOTP_TOLERANCE", 0)
@property
def SUPPORTED_TYPES(self):
dflt = ["recovery_codes", "totp"]
return self._setting("SUPPORTED_TYPES", dflt)
@property
def WEBAUTHN_ALLOW_INSECURE_ORIGIN(self):
return self._setting("WEBAUTHN_ALLOW_INSECURE_ORIGIN", False)
@property
def PASSKEY_LOGIN_ENABLED(self) -> bool:
return "webauthn" in self.SUPPORTED_TYPES and self._setting(
"PASSKEY_LOGIN_ENABLED", False
)
@property
def PASSKEY_SIGNUP_ENABLED(self) -> bool:
return "webauthn" in self.SUPPORTED_TYPES and self._setting(
"PASSKEY_SIGNUP_ENABLED", False
)
@property
def TRUST_ENABLED(self) -> bool:
return self._setting("TRUST_ENABLED", False)
@property
def _TRUST_STAGE_ENABLED(self) -> bool:
from allauth.account import app_settings as account_settings
return self.TRUST_ENABLED or account_settings.LOGIN_BY_CODE_TRUST_ENABLED
@property
def TRUST_COOKIE_AGE(self) -> timedelta:
age = self._setting("TRUST_COOKIE_AGE", timedelta(days=14))
if not isinstance(age, timedelta):
age = timedelta(seconds=age)
return age
@property
def TRUST_COOKIE_NAME(self) -> str:
return self._setting("TRUST_COOKIE_NAME", "mfa_trusted")
@property
def TRUST_COOKIE_DOMAIN(self) -> Optional[str]:
from django.conf import settings
return self._setting("TRUST_COOKIE_DOMAIN", settings.SESSION_COOKIE_DOMAIN)
@property
def TRUST_COOKIE_HTTPONLY(self) -> bool:
from django.conf import settings
return self._setting("TRUST_COOKIE_HTTPONLY", settings.SESSION_COOKIE_HTTPONLY)
@property
def TRUST_COOKIE_PATH(self) -> str:
from django.conf import settings
return self._setting("TRUST_COOKIE_PATH", settings.SESSION_COOKIE_PATH)
@property
def TRUST_COOKIE_SAMESITE(self) -> str:
from django.conf import settings
return self._setting("TRUST_COOKIE_SAMESITE", settings.SESSION_COOKIE_SAMESITE)
@property
def TRUST_COOKIE_SECURE(self) -> Optional[str]:
from django.conf import settings
return self._setting("TRUST_COOKIE_SECURE", settings.SESSION_COOKIE_SECURE)
_app_settings = AppSettings("MFA_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | scrapy__scrapy | tests/CrawlerProcess/simple.py | {
"start": 58,
"end": 267
} | class ____(scrapy.Spider):
name = "no_request"
async def start(self):
return
yield
process = CrawlerProcess(settings={})
process.crawl(NoRequestsSpider)
process.start()
| NoRequestsSpider |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 16024,
"end": 20723
} | class ____(_FieldWrapper):
"""Represents a choice field.
Multiselect is not currently supported; multiselect fields will still only allow
selecting a single value.
"""
@property
def is_multiselect(self) -> bool:
"""Is this a multiselect field?
Multiselect fields are currently treated as single-selection fields. True
multiselect is not yet supported, but this flag is presented for your
information.
"""
# True multiselect could be enabled by setting /V to an array. However, I'm not
# sure how to generate an appropriate appearance stream for a multiselect, and
# QPDF doesn't seem to account for multiselect fields in it's appearance stream
# generation algorithm either. This would require more research.
return bool(self._field.flags & FormFieldFlag.ch_multi_select)
@property
def is_combobox(self) -> bool:
"""Is this a combobox field? If false, this is instead a list box."""
return bool(self._field.flags & FormFieldFlag.ch_combo)
@property
def allow_edit(self) -> bool:
"""Does this field include an editable text box in addition to the dropdown?
The field must be a comboxbox; this option is not valid for list boxes.
"""
return bool(self._field.flags & FormFieldFlag.ch_edit)
@property
def spell_check_enabled(self) -> bool:
"""Should spell-checking be enabled in this field?
This is only valid for fields that allow editing.
"""
return not self._field.flags & FormFieldFlag.ch_do_not_spell_check
@property
def options(self) -> Sequence[ChoiceFieldOption]:
"""A list of all available options."""
# The implementation in QPDF is not correct, as it only includes options which
# are strings (see https://github.com/qpdf/qpdf/issues/1433). We opt for our own
# implementation here.
if Name.Opt not in self._field.obj:
# It is perfectly valid for the choice field to have no options
return ()
return tuple(
ChoiceFieldOption(self, opt, index)
for index, opt in enumerate(self._field.obj.Opt.as_list())
)
@property
def selected(self) -> ChoiceFieldOption | None:
"""The currently selected option, or None if no option is selected."""
if Name.Opt in self._field.obj:
for index, opt in enumerate(self._field.obj.Opt.as_list()):
opt = ChoiceFieldOption(self, opt, index)
if opt.export_value == self.value:
return opt
return ChoiceFieldOption(self, self.value, None)
@selected.setter
def selected(self, option: ChoiceFieldOption):
if option._field is not self:
raise ValueError('Option does not belong to this field')
# The PDF spec uses some language which makes me believe that it may still be
# expected to use the display value as the value of V rather than the export
# value. It isn't entirely clear to me either way. So, this may be incorrect.
# If so, it should be as simple a matter to fix as changing `export_value` to
# `display_value` in both the getter and the setter.
self._field.set_value(
option.export_value, self._form.generate_appearances is None
)
# Generate appearance streams if requested.
if self._form.generate_appearances is not None:
self._form.generate_appearances.generate_choice(self._field)
# I'm ignoring the /I array for now, as it only is required for multiselect.
@property
def value(self) -> str | None:
"""The value of the currently selected option."""
if self._field.value is not None:
return self._field.value_as_string
return None
@value.setter
def value(self, value: str | None):
if not self.allow_edit:
# Prevent setting a value not in the option list, unless the field is
# editable
okay = False
for index, opt in enumerate(self._field.obj.Opt):
opt = ChoiceFieldOption(self, opt, index)
if opt.export_value == value:
okay = True
break
if not okay:
raise ValueError("Not a valid option for this choice field:", value)
self._field.set_value(value, self._form.generate_appearances is None)
# Generate appearance streams if requested.
if self._form.generate_appearances is not None:
self._form.generate_appearances.generate_choice(self._field)
| ChoiceField |
python | prabhupant__python-ds | data_structures/graphs/count_trees.py | {
"start": 286,
"end": 1227
} | class ____:
def __init__(self, vertices):
self.vertices = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[v].append(u)
self.graph[u].append(v)
def count_trees(self):
visited = [False] * self.vertices
count = 0
for s in range(self.vertices):
if not visited[s]:
visited[s] = True
stack = []
stack.append(s)
count += 1
while stack:
print(stack)
s = stack.pop()
for i in self.graph[s]:
if not visited[i]:
visited[i] = True
stack.append(i)
return count
g = Graph(5)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(3, 4)
print('Count of trees - ', g.count_trees()) | Graph |
python | django__django | django/db/models/lookups.py | {
"start": 22515,
"end": 22597
} | class ____(Contains):
lookup_name = "icontains"
@Field.register_lookup
| IContains |
python | realpython__materials | python-sqlite-sqlalchemy/project/examples/example_3/app/models.py | {
"start": 4553,
"end": 5493
} | class ____(db.Model):
__tablename__ = "tracks"
track_id = db.Column("TrackId", db.Integer, primary_key=True)
name = db.Column("Name", db.String(200), nullable=False)
album_id = db.Column(
"AlbumId", db.ForeignKey("albums.AlbumId"), index=True
)
media_type_id = db.Column(
"MediaTypeId",
db.ForeignKey("media_types.MediaTypeId"),
nullable=False,
index=True,
)
genre_id = db.Column(
"GenreId", db.ForeignKey("genres.GenreId"), index=True
)
composer = db.Column("Composer", db.String(220))
milliseconds = db.Column("Milliseconds", db.Integer, nullable=False)
bytes = db.Column("Bytes", db.Integer)
unit_price = db.Column("UnitPrice", db.Float, nullable=False)
invoice_items = db.relationship("InvoiceItem", backref="track")
playlists = db.relationship(
"Playlist", secondary="playlist_track", back_populates="tracks"
)
| Track |
python | doocs__leetcode | lcof2/剑指 Offer II 040. 矩阵中最大的矩形/Solution.py | {
"start": 0,
"end": 1118
} | class ____:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
heights = [0] * len(matrix[0])
ans = 0
for row in matrix:
for j, v in enumerate(row):
if v == "1":
heights[j] += 1
else:
heights[j] = 0
ans = max(ans, self.largestRectangleArea(heights))
return ans
def largestRectangleArea(self, heights: List[int]) -> int:
n = len(heights)
stk = []
left = [-1] * n
right = [n] * n
for i, h in enumerate(heights):
while stk and heights[stk[-1]] >= h:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
h = heights[i]
while stk and heights[stk[-1]] >= h:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
return max(h * (right[i] - left[i] - 1) for i, h in enumerate(heights))
| Solution |
python | ray-project__ray | doc/source/serve/doc_code/sklearn_quickstart.py | {
"start": 408,
"end": 1138
} | class ____:
def __init__(self, model):
self.model = model
self.label_list = iris_dataset["target_names"].tolist()
async def __call__(self, request: Request) -> Dict:
payload = (await request.json())["vector"]
print(f"Received http request with data {payload}")
prediction = self.model.predict([payload])[0]
human_name = self.label_list[prediction]
return {"result": human_name}
# Deploy model.
serve.run(BoostingModel.bind(model), route_prefix="/iris")
# Query it!
sample_request_input = {"vector": [1.2, 1.0, 1.1, 0.9]}
response = requests.get(
"http://localhost:8000/iris", json=sample_request_input)
print(response.text)
# __serve_example_end__
| BoostingModel |
python | huggingface__transformers | src/transformers/models/camembert/modular_camembert.py | {
"start": 8297,
"end": 12814
} | class ____(RobertaForMultipleChoice):
def __init__(self, config):
super().__init__(config)
del self.camembert
self.roberta = CamembertModel(config, add_pooling_layer=False)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(reshaped_logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| CamembertForMultipleChoice |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 22132,
"end": 22356
} | class ____(PrefectBaseModel):
"""Filter by `BlockSchema.block_type_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of block type ids to include"
)
| BlockSchemaFilterBlockTypeId |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 7911,
"end": 9029
} | class ____:
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.name, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attrs.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute that is
not callable.
.. versionadded:: 19.1.0
Raises:
attrs.exceptions.NotCallableError:
With a human readable error message containing the attribute
(`attrs.Attribute`) name, and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, unsafe_hash=True)
| _IsCallableValidator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI063.py | {
"start": 844,
"end": 1934
} | class ____:
def bad(__self) -> None: ... # PYI063
@staticmethod
def bad2(__self) -> None: ... # PYI063
def bad3(__self, __x: int) -> None: ... # PYI063
def still_bad(self, __x_: int) -> None: ... # PYI063
@staticmethod
def this_is_bad_too(__x: int) -> None: ... # PYI063
@classmethod
def not_good(cls, __foo: int) -> None: ... # PYI063
# The first non-self argument isn't positional-only, so logically the second can't be either:
def okay1(self, x: int, __y: int) -> None: ...
# Same here:
@staticmethod
def okay2(x: int, __y_: int) -> None: ...
@staticmethod
def no_args() -> int: ...
def okay3(__self__, __x__: int, __y: str) -> None: ...
def okay4(self, /) -> None: ...
def okay5(self, x: int, /) -> None: ...
def okay6(__self, /) -> None: ...
def cool(_self__: int) -> None: ...
def also_cool(self__: int) -> None: ...
def unclear_from_pep_484_if_this_is_positional_or_not(__: str) -> None: ...
def _(_: int) -> None: ...
@classmethod
def fine(cls, foo: int, /) -> None: ...
| Foo |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 22994,
"end": 23078
} | class ____(_OracleNumericCommon, sqltypes.Numeric):
is_number = True
| _OracleNUMBER |
python | keras-team__keras | keras/src/wrappers/sklearn_wrapper.py | {
"start": 13986,
"end": 17579
} | class ____(TransformerMixin, SKLBase):
"""scikit-learn compatible transformer wrapper for Keras models.
Note that this is a scikit-learn compatible transformer, and not a
transformer in the deep learning sense.
Also note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
Example:
A common use case for a scikit-learn transformer, is to have a step
which gives you the embedding of your data. Here we assume
`my_package.my_model` is a Keras model which takes the input and gives
embeddings of the data, and `my_package.my_data` is your dataset loader.
``` python
from my_package import my_model, my_data
from keras.wrappers import SKLearnTransformer
from sklearn.frozen import FrozenEstimator # requires scikit-learn>=1.6
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import HistGradientBoostingClassifier
X, y = my_data()
trs = FrozenEstimator(SKLearnTransformer(model=my_model))
pipe = make_pipeline(trs, HistGradientBoostingClassifier())
pipe.fit(X, y)
```
Note that in the above example, `FrozenEstimator` prevents any further
training of the transformer step in the pipeline, which can be the case
if you don't want to change the embedding model at hand.
"""
def transform(self, X):
"""Transform the data.
Args:
X: array-like, shape=(n_samples, n_features)
The input samples.
Returns:
X_transformed: array-like, shape=(n_samples, n_features)
The transformed data.
"""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
X = _validate_data(self, X, reset=False)
return self.model_.predict(X)
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {
"preserves_dtype": [],
}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = []
return tags
| SKLearnTransformer |
python | huggingface__transformers | src/transformers/models/efficientnet/modeling_efficientnet.py | {
"start": 3932,
"end": 4832
} | class ____(nn.Module):
r"""
This corresponds to the expansion phase of each block in the original implementation.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int):
super().__init__()
self.expand_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=out_dim,
kernel_size=1,
padding="same",
bias=False,
)
self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
self.expand_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
# Expand phase
hidden_states = self.expand_conv(hidden_states)
hidden_states = self.expand_bn(hidden_states)
hidden_states = self.expand_act(hidden_states)
return hidden_states
| EfficientNetExpansionLayer |
python | davidhalter__parso | parso/python/errors.py | {
"start": 17629,
"end": 17865
} | class ____(Rule):
code = 903
def _get_message(self, message, node):
message = super()._get_message(message, node)
return "IndentationError: " + message
@ErrorFinder.register_rule(type='error_node')
| IndentationRule |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 1054,
"end": 2208
} | class ____:
def __init__(self, options, raw=False):
self.raw = raw
self.options = options
def _write_value(self, rng, value, scalar):
if rng.api and value:
# it is assumed by this stage that value is a list of lists
if scalar:
value = value[0][0]
else:
rng = rng.resize(len(value), len(value[0]))
chunksize = self.options.get("chunksize")
if chunksize:
for ix, value_chunk in enumerate(chunk(value, chunksize)):
rng[
ix * chunksize : ix * chunksize + chunksize, :
].raw_value = value_chunk
else:
rng.raw_value = value
def __call__(self, ctx):
if ctx.range and ctx.value:
if self.raw:
ctx.range.raw_value = ctx.value
return
scalar = ctx.meta.get("scalar", False)
if not scalar:
ctx.range = ctx.range.resize(len(ctx.value), len(ctx.value[0]))
self._write_value(ctx.range, ctx.value, scalar)
| WriteValueToRangeStage |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 20498,
"end": 31884
} | class ____(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# zyx abc
# yxw bcd
# xwv cde
for idx in range(3):
title = (
chr(ord('z') - idx) +
chr(ord('y') - idx) +
chr(ord('x') - idx)
)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
OrderingFilterModel(title=title, text=text).save()
def test_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_reverse_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-text'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_incorrecturl_extrahyphens_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '--text'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_incorrectfield_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'foobar'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_ordering_without_ordering_fields(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializerWithModelProperty
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
view = OrderingListView.as_view()
# Model field ordering works fine.
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc', 'description': 'zyx: abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd', 'description': 'yxw: bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde', 'description': 'xwv: cde'},
]
# `incorrectfield` ordering works fine.
request = factory.get('/', {'ordering': 'foobar'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde', 'description': 'xwv: cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd', 'description': 'yxw: bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc', 'description': 'zyx: abc'},
]
# `description` is a Model property, which should be ignored.
request = factory.get('/', {'ordering': 'description'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde', 'description': 'xwv: cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd', 'description': 'yxw: bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc', 'description': 'zyx: abc'},
]
def test_default_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_default_ordering_using_string(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_ordering_by_aggregate_field(self):
# create some related models to aggregate order by
num_objs = [2, 5, 3]
for obj, num_related in zip(OrderingFilterModel.objects.all(),
num_objs):
for _ in range(num_related):
new_related = OrderingFilterRelatedModel(
related_object=obj
)
new_related.save()
class OrderingListView(generics.ListAPIView):
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = '__all__'
queryset = OrderingFilterModel.objects.all().annotate(
models.Count("related"))
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'related__count'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
]
def test_ordering_by_dotted_source(self):
for index, obj in enumerate(OrderingFilterModel.objects.all()):
OrderingFilterRelatedModel.objects.create(
related_object=obj,
index=index
)
class OrderingListView(generics.ListAPIView):
serializer_class = OrderingDottedRelatedSerializer
filter_backends = (filters.OrderingFilter,)
queryset = OrderingFilterRelatedModel.objects.all()
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'related_object__text'})
response = view(request)
assert response.data == [
{'related_title': 'zyx', 'related_text': 'abc', 'index': 0},
{'related_title': 'yxw', 'related_text': 'bcd', 'index': 1},
{'related_title': 'xwv', 'related_text': 'cde', 'index': 2},
]
request = factory.get('/', {'ordering': '-index'})
response = view(request)
assert response.data == [
{'related_title': 'xwv', 'related_text': 'cde', 'index': 2},
{'related_title': 'yxw', 'related_text': 'bcd', 'index': 1},
{'related_title': 'zyx', 'related_text': 'abc', 'index': 0},
]
def test_ordering_with_nonstandard_ordering_param(self):
with override_settings(REST_FRAMEWORK={'ORDERING_PARAM': 'order'}):
reload_module(filters)
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'order': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
reload_module(filters)
def test_get_template_context(self):
class OrderingListView(generics.ListAPIView):
ordering_fields = '__all__'
serializer_class = OrderingFilterSerializer
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
request = factory.get('/', {'ordering': 'title'}, HTTP_ACCEPT='text/html')
view = OrderingListView.as_view()
response = view(request)
self.assertContains(response, 'verbose title')
def test_ordering_with_overridden_get_serializer_class(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class specified
def get_serializer_class(self):
return OrderingFilterSerializer
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_ordering_with_improper_configuration(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class
# or get_serializer_class specified
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
with self.assertRaises(ImproperlyConfigured):
view(request)
| OrderingFilterTests |
python | scrapy__scrapy | scrapy/core/downloader/tls.py | {
"start": 763,
"end": 3247
} | class ____(ClientTLSOptions):
"""
SSL Client connection creator ignoring certificate verification errors
(for genuinely invalid certificates or bugs in verification code).
Same as Twisted's private _sslverify.ClientTLSOptions,
except that VerificationError, CertificateError and ValueError
exceptions are caught, so that the connection is not closed, only
logging warnings. Also, HTTPS connection parameters logging is added.
"""
def __init__(self, hostname: str, ctx: SSL.Context, verbose_logging: bool = False):
super().__init__(hostname, ctx)
self.verbose_logging: bool = verbose_logging
def _identityVerifyingInfoCallback(
self, connection: SSL.Connection, where: int, ret: Any
) -> None:
if where & SSL.SSL_CB_HANDSHAKE_START:
connection.set_tlsext_host_name(self._hostnameBytes)
elif where & SSL.SSL_CB_HANDSHAKE_DONE:
if self.verbose_logging:
logger.debug(
"SSL connection to %s using protocol %s, cipher %s",
self._hostnameASCII,
connection.get_protocol_version_name(),
connection.get_cipher_name(),
)
server_cert = connection.get_peer_certificate()
if server_cert:
logger.debug(
'SSL connection certificate: issuer "%s", subject "%s"',
x509name_to_string(server_cert.get_issuer()),
x509name_to_string(server_cert.get_subject()),
)
key_info = get_temp_key_info(connection._ssl)
if key_info:
logger.debug("SSL temp key: %s", key_info)
try:
verifyHostname(connection, self._hostnameASCII)
except (CertificateError, VerificationError) as e:
logger.warning(
'Remote certificate is not valid for hostname "%s"; %s',
self._hostnameASCII,
e,
)
except ValueError as e:
logger.warning(
"Ignoring error while verifying certificate "
'from host "%s" (exception: %r)',
self._hostnameASCII,
e,
)
DEFAULT_CIPHERS: AcceptableCiphers = AcceptableCiphers.fromOpenSSLCipherString(
"DEFAULT"
)
| ScrapyClientTLSOptions |
python | facebook__pyre-check | tools/generate_taint_models/tests/inspect_parser_test.py | {
"start": 2320,
"end": 5549
} | class ____(unittest.TestCase):
def test_inherited_methods(self) -> None:
self.assertEqual(
extract_qualified_name(TestClass.method),
"{}.TestClass.method".format(__name__),
)
self.assertEqual(
extract_qualified_name(TestDerived.method),
"{}.TestClass.method".format(__name__),
)
# Parameter __eq__ was overridden to only check the name let's verify all the attributes
def _assert_equals_parameters(
self, parameters: List[Parameter], expected_parameters: List[Parameter]
) -> None:
self.assertEqual(len(parameters), len(expected_parameters))
for parameter, expected_parameter in zip(
parameters,
expected_parameters,
):
self.assertEqual(parameter.Kind, expected_parameter.Kind)
self.assertEqual(parameter.annotation, expected_parameter.annotation)
self.assertEqual(parameter.name, expected_parameter.name)
def test_extract_parameters(self) -> None:
expected_parameters = [
Parameter(name="arg1", annotation="_empty", kind=Parameter.Kind.ARG),
Parameter(name="arg2", annotation="TestClass", kind=Parameter.Kind.ARG),
Parameter(name="arg3", annotation="TestClass", kind=Parameter.Kind.ARG),
Parameter(name="*vararg", annotation="_empty", kind=Parameter.Kind.VARARG),
Parameter(name="**kwarg", annotation="_empty", kind=Parameter.Kind.KWARG),
]
self._assert_equals_parameters(
extract_parameters(test_function), expected_parameters
)
expected_parameters = [
Parameter(name="self", annotation="_empty", kind=Parameter.Kind.ARG)
] + expected_parameters
self._assert_equals_parameters(
extract_parameters(TestMethodClass.test_method), expected_parameters
)
expected_parameters_annotated = [
Parameter(name="arg1", annotation="TestClass", kind=Parameter.Kind.ARG),
Parameter(name="arg2", annotation="TestClass", kind=Parameter.Kind.ARG),
]
self._assert_equals_parameters(
extract_parameters(test_annotated_parameter_function),
expected_parameters_annotated,
)
def test_strip_custom_annotations(self) -> None:
self.assertEqual(
strip_custom_annotations("TestClass"),
"TestClass",
)
self.assertEqual(
strip_custom_annotations("Tuple[int, int]"),
"Tuple[int, int]",
)
self.assertEqual(
strip_custom_annotations(
"Annotated[TestClass, ExampleAnnotation(accesses=(Access.REVIEWED,))]"
),
"TestClass",
)
self.assertEqual(
strip_custom_annotations(
"Annotated[Tuple[int, int], ExampleAnnotation(accesses=(Access.REVIEWED,))]"
),
"Tuple[int, int]",
)
self.assertEqual(
strip_custom_annotations(
"Annotated[Optional[TestClass], ExampleAnnotation(accesses=(Access.REVIEWED,))]"
),
"Optional[TestClass]",
)
| InspectParserTest |
python | kamyu104__LeetCode-Solutions | Python/final-array-state-after-k-multiplication-operations-i.py | {
"start": 3040,
"end": 3979
} | class ____(object):
def getFinalState(self, nums, k, multiplier):
"""
:type nums: List[int]
:type k: int
:type multiplier: int
:rtype: List[int]
"""
if multiplier == 1:
return nums
min_heap = [(x, i) for i, x in enumerate(nums)]
heapq.heapify(min_heap)
mx = max(nums)
for k in reversed(xrange(1, k+1)):
if min_heap[0][0]*multiplier > mx:
break
x, i = heapq.heappop(min_heap)
heapq.heappush(min_heap, (x*multiplier, i))
else:
k = 0
vals = sorted(min_heap)
q, r = divmod(k, len(nums))
m = pow(multiplier, q)
result = [0]*len(nums)
for idx, (x, i) in enumerate(vals):
result[i] = x*m*(multiplier if idx < r else 1)
return result
# Time: O(n + klogn)
# Space: O(n)
import heapq
# simulation, heap
| Solution3 |
python | django__django | tests/utils_tests/test_crypto.py | {
"start": 187,
"end": 2445
} | class ____(SimpleTestCase):
def test_constant_time_compare(self):
# It's hard to test for constant time, just test the result.
self.assertTrue(constant_time_compare(b"spam", b"spam"))
self.assertFalse(constant_time_compare(b"spam", b"eggs"))
self.assertTrue(constant_time_compare("spam", "spam"))
self.assertFalse(constant_time_compare("spam", "eggs"))
self.assertTrue(constant_time_compare(b"spam", "spam"))
self.assertFalse(constant_time_compare("spam", b"eggs"))
self.assertTrue(constant_time_compare("ありがとう", "ありがとう"))
self.assertFalse(constant_time_compare("ありがとう", "おはよう"))
def test_salted_hmac(self):
tests = [
((b"salt", b"value"), {}, "b51a2e619c43b1ca4f91d15c57455521d71d61eb"),
(("salt", "value"), {}, "b51a2e619c43b1ca4f91d15c57455521d71d61eb"),
(
("salt", "value"),
{"secret": "abcdefg"},
"8bbee04ccddfa24772d1423a0ba43bd0c0e24b76",
),
(
("salt", "value"),
{"secret": "x" * hashlib.sha1().block_size},
"bd3749347b412b1b0a9ea65220e55767ac8e96b0",
),
(
("salt", "value"),
{"algorithm": "sha256"},
"ee0bf789e4e009371a5372c90f73fcf17695a8439c9108b0480f14e347b3f9ec",
),
(
("salt", "value"),
{
"algorithm": "blake2b",
"secret": "x" * hashlib.blake2b().block_size,
},
"fc6b9800a584d40732a07fa33fb69c35211269441823bca431a143853c32f"
"e836cf19ab881689528ede647dac412170cd5d3407b44c6d0f44630690c54"
"ad3d58",
),
]
for args, kwargs, digest in tests:
with self.subTest(args=args, kwargs=kwargs):
self.assertEqual(salted_hmac(*args, **kwargs).hexdigest(), digest)
def test_invalid_algorithm(self):
msg = "'whatever' is not an algorithm accepted by the hashlib module."
with self.assertRaisesMessage(InvalidAlgorithm, msg):
salted_hmac("salt", "value", algorithm="whatever")
| TestUtilsCryptoMisc |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-elements-within-k-subarrays-equal.py | {
"start": 2364,
"end": 6119
} | class ____(object):
def minOperations(self, nums, x, k):
"""
:type nums: List[int]
:type x: int
:type k: int
:rtype: int
"""
class LazyHeap(object):
def __init__(self, sign):
self.heap = []
self.to_remove = collections.defaultdict(int)
self.cnt = 0
self.sign = sign
def push(self, val):
heapq.heappush(self.heap, self.sign*val)
def full_remove(self):
result = []
for x in self.heap:
if x not in self.to_remove:
result.append(x)
continue
self.to_remove[x] -= 1
if not self.to_remove[x]:
del self.to_remove[x]
self.heap[:] = result
heapq.heapify(self.heap)
def remove(self, val):
self.to_remove[self.sign*val] += 1
self.cnt += 1
if self.cnt > len(self.heap)-self.cnt:
self.full_remove()
self.cnt = 0
def pop(self):
self.remove(self.top())
def top(self):
while self.heap and self.heap[0] in self.to_remove:
self.to_remove[self.heap[0]] -= 1
self.cnt -= 1
if self.to_remove[self.heap[0]] == 0:
del self.to_remove[self.heap[0]]
heapq.heappop(self.heap)
return self.sign*self.heap[0]
def __len__(self):
return len(self.heap)-self.cnt
class SlidingWindow(object):
def __init__(self):
self.left = LazyHeap(-1) # max heap
self.right = LazyHeap(+1) # min heap
self.total1 = self.total2 = 0
def add(self, val):
if not self.left or val <= self.left.top():
self.left.push(val)
self.total1 += val
else:
self.right.push(val)
self.total2 += val
self.rebalance()
def remove(self, val):
if val <= self.left.top():
self.left.remove(val)
self.total1 -= val
else:
self.right.remove(val)
self.total2 -= val
self.rebalance()
def rebalance(self):
if len(self.left) < len(self.right):
self.total2 -= self.right.top()
self.total1 += self.right.top()
self.left.push(self.right.top())
self.right.pop()
elif len(self.left) > len(self.right)+1:
self.total1 -= self.left.top()
self.total2 += self.left.top()
self.right.push(self.left.top())
self.left.pop()
def median(self):
return self.left.top()
INF = float("inf")
sw = SlidingWindow()
cost = [INF]*(len(nums)+1)
for i in xrange(len(nums)):
if i-x >= 0:
sw.remove(nums[i-x])
sw.add(nums[i])
if i >= x-1:
cost[i+1] = (sw.median()*len(sw.left)-sw.total1) + (sw.total2-sw.median()*len(sw.right))
dp = [0]*(len(nums)+1)
for i in xrange(k):
new_dp = [INF]*(len(nums)+1)
for j in xrange((i+1)*x, len(nums)+1):
new_dp[j] = min(new_dp[j-1], dp[j-x]+cost[j])
dp = new_dp
return dp[-1]
| Solution2 |
python | pandas-dev__pandas | pandas/tests/extension/decimal/array.py | {
"start": 1426,
"end": 9906
} | class ____(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False, context=None) -> None:
for i, val in enumerate(values):
if is_float(val) or is_integer(val):
if np.isnan(val):
values[i] = DecimalDtype.na_value
else:
# error: Argument 1 has incompatible type "float | int |
# integer[Any]"; expected "Decimal | float | str | tuple[int,
# Sequence[int], int]"
values[i] = DecimalDtype.type(val) # type: ignore[arg-type]
elif not isinstance(val, decimal.Decimal):
raise TypeError("All values must be of type " + str(decimal.Decimal))
values = np.asarray(values, dtype=object)
self._data = values
# Some aliases for common attribute names to ensure pandas supports
# these
self._items = self.data = self._data
# those aliases are currently not working due to assumptions
# in internal code (GH-20735)
# self._values = self.values = self.data
self._dtype = DecimalDtype(context)
@property
def dtype(self):
return self._dtype
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return cls(scalars)
@classmethod
def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy=False):
return cls._from_sequence(
[decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy
)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _cast_pointwise_result(self, values):
result = super()._cast_pointwise_result(values)
try:
# If this were ever made a non-test EA, special-casing could
# be avoided by handling Decimal in maybe_convert_objects
res = type(self)._from_sequence(result, dtype=self.dtype)
except (ValueError, TypeError):
return result
return res
_HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)
def to_numpy(
self,
dtype=None,
copy: bool = False,
na_value: object = no_default,
decimals=None,
) -> np.ndarray:
result = np.asarray(self, dtype=dtype)
if decimals is not None:
result = np.asarray([round(x, decimals) for x in result])
return result
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
if not all(
isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs
):
return NotImplemented
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
# e.g. test_array_ufunc_series_scalar_other
return result
if "out" in kwargs:
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
def reconstruct(x):
if isinstance(x, (decimal.Decimal, numbers.Number)):
return x
else:
return type(self)._from_sequence(x, dtype=self.dtype)
if ufunc.nout > 1:
return tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self._data[item]
else:
# array, slice.
item = pd.api.indexers.check_array_indexer(self, item)
result = type(self)(self._data[item])
if getitem_returns_view(self, item):
result._readonly = self._readonly
return result
def take(self, indexer, allow_fill=False, fill_value=None):
from pandas.api.extensions import take
data = self._data
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(self._data.copy(), dtype=self.dtype)
def astype(self, dtype, copy=True):
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
dtype = pandas_dtype(dtype)
if isinstance(dtype, type(self.dtype)):
return type(self)(self._data, copy=copy, context=dtype.context)
return super().astype(dtype, copy=copy)
def __setitem__(self, key, value) -> None:
if self._readonly:
raise ValueError("Cannot modify read-only array")
if is_list_like(value):
if is_scalar(key):
raise ValueError("setting an array element with a sequence.")
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
key = check_array_indexer(self, key)
self._data[key] = value
def __len__(self) -> int:
return len(self._data)
def __contains__(self, item) -> bool | np.bool_:
if not isinstance(item, decimal.Decimal):
return False
elif item.is_nan():
return self.isna().any()
else:
return super().__contains__(item)
@property
def nbytes(self) -> int:
n = len(self)
if n:
return n * sys.getsizeof(self[0])
return 0
def isna(self):
return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
return decimal.Decimal("NaN")
def _formatter(self, boxed=False):
if boxed:
return "Decimal: {}".format
return repr
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
if skipna and self.isna().any():
# If we don't have any NAs, we can ignore skipna
other = self[~self.isna()]
result = other._reduce(name, **kwargs)
elif name == "sum" and len(self) == 0:
# GH#29630 avoid returning int 0 or np.bool_(False) on old numpy
result = decimal.Decimal(0)
else:
try:
op = getattr(self.data, name)
except AttributeError as err:
raise NotImplementedError(
f"decimal does not support the {name} operation"
) from err
result = op(axis=0)
if keepdims:
return type(self)([result])
else:
return result
def _cmp_method(self, other, op):
# For use with OpsMixin
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else:
# Assume it's an object
ovalues = [param] * len(self)
return ovalues
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
return np.asarray(res, dtype=bool)
# We override fillna here to simulate a 3rd party EA that has done so. This
# lets us test a 3rd-party EA that has not yet updated to include a "copy"
# keyword in its fillna method.
def fillna(self, value=None, limit=None):
return super().fillna(value=value, limit=limit, copy=True)
def to_decimal(values, context=None):
return DecimalArray([decimal.Decimal(x) for x in values], context=context)
def make_data(n: int):
return [decimal.Decimal(val) for val in np.random.default_rng(2).random(n)]
DecimalArray._add_arithmetic_ops()
| DecimalArray |
python | walkccc__LeetCode | solutions/1680. Concatenation of Consecutive Binary Numbers/1680.py | {
"start": 0,
"end": 268
} | class ____:
def concatenatedBinary(self, n: int) -> int:
MOD = 1_000_000_007
ans = 0
def numberOfBits(n: int) -> int:
return int(math.log2(n)) + 1
for i in range(1, n + 1):
ans = ((ans << numberOfBits(i)) + i) % MOD
return ans
| Solution |
python | doocs__leetcode | solution/0100-0199/0155.Min Stack/Solution.py | {
"start": 0,
"end": 559
} | class ____:
def __init__(self):
self.stk1 = []
self.stk2 = [inf]
def push(self, val: int) -> None:
self.stk1.append(val)
self.stk2.append(min(val, self.stk2[-1]))
def pop(self) -> None:
self.stk1.pop()
self.stk2.pop()
def top(self) -> int:
return self.stk1[-1]
def getMin(self) -> int:
return self.stk2[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| MinStack |
python | plotly__plotly.py | plotly/graph_objs/layout/_activeshape.py | {
"start": 235,
"end": 3061
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.activeshape"
_valid_props = {"fillcolor", "opacity"}
@property
def fillcolor(self):
"""
Sets the color filling the active shape' interior.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def opacity(self):
"""
Sets the opacity of the active shape.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
fillcolor
Sets the color filling the active shape' interior.
opacity
Sets the opacity of the active shape.
"""
def __init__(self, arg=None, fillcolor=None, opacity=None, **kwargs):
"""
Construct a new Activeshape object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Activeshape`
fillcolor
Sets the color filling the active shape' interior.
opacity
Sets the opacity of the active shape.
Returns
-------
Activeshape
"""
super().__init__("activeshape")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Activeshape
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Activeshape`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Activeshape |
python | ansible__ansible | lib/ansible/playbook/play.py | {
"start": 1748,
"end": 19579
} | class ____(Base, Taggable, CollectionSearch):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
hosts = NonInheritableFieldAttribute(isa='list', required=True, listof=(str,), always_post_validate=True, priority=-2)
# Facts
gather_facts = NonInheritableFieldAttribute(isa='bool', default=None, always_post_validate=True)
gather_subset = NonInheritableFieldAttribute(isa='list', default=None, listof=(str,), always_post_validate=True)
gather_timeout = NonInheritableFieldAttribute(isa='int', default=None, always_post_validate=True)
fact_path = NonInheritableFieldAttribute(isa='string', default=None)
# Variable Attributes
vars_files = NonInheritableFieldAttribute(isa='list', default=list, priority=99)
vars_prompt = NonInheritableFieldAttribute(isa='list', default=list, always_post_validate=False)
validate_argspec = NonInheritableFieldAttribute(isa='string', always_post_validate=True)
# Role Attributes
roles = NonInheritableFieldAttribute(isa='list', default=list, priority=90)
# Block (Task) Lists Attributes
handlers = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
pre_tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
post_tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
tasks = NonInheritableFieldAttribute(isa='list', default=list, priority=-1)
# Flag/Setting Attributes
force_handlers = NonInheritableFieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
max_fail_percentage = NonInheritableFieldAttribute(isa='percent', always_post_validate=True)
serial = NonInheritableFieldAttribute(isa='list', default=list, always_post_validate=True)
strategy = NonInheritableFieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
order = NonInheritableFieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.role_cache = {}
self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
self._action_groups = {}
self._group_actions = {}
def __repr__(self):
return self.get_name()
def _get_cached_role(self, role):
role_path = role.get_role_path()
role_cache = self.role_cache[role_path]
try:
idx = role_cache.index(role)
return role_cache[idx]
except ValueError:
raise AnsibleError(f'Cannot locate {role.get_name()} in role cache')
def _validate_hosts(self, attribute, name, value):
# Only validate 'hosts' if a value was passed in to original data set.
if 'hosts' in self._ds:
if not value:
raise AnsibleParserError("Hosts list cannot be empty. Please check your playbook")
if is_sequence(value):
# Make sure each item in the sequence is a valid string
for entry in value:
if entry is None:
raise AnsibleParserError("Hosts list cannot contain values of 'None'. Please check your playbook")
elif not isinstance(entry, (bytes, str)):
raise AnsibleParserError("Hosts list contains an invalid host value: '{host!s}'".format(host=entry))
elif not isinstance(value, (bytes, str, EncryptedString)):
raise AnsibleParserError("Hosts list must be a sequence or string. Please check your playbook.")
def get_name(self):
""" return the name of the Play """
if self.name:
return self.name
if is_sequence(self.hosts):
self.name = ','.join(self.hosts)
else:
self.name = self.hosts or ''
return self.name
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
"""
Adjusts play datastructure to cleanup old/legacy items
"""
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for this play. "
"The use of 'user' is deprecated, and should be removed", obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
# DTFIX-FUTURE: these do nothing but augment the exception message; DRY and nuke
def _load_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading tasks.", obj=self._ds) from ex
def _load_pre_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks.", obj=self._ds) from ex
def _load_post_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks.", obj=self._ds) from ex
def _load_handlers(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
"""
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading handlers.", obj=self._ds) from ex
def _load_roles(self, attr, ds):
"""
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
"""
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
except AssertionError as ex:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds) from ex
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
self.roles[:0] = roles
return self.roles
def _load_vars_prompt(self, attr, ds):
# avoid circular dep
from ansible.vars.manager import preprocess_vars
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
for key in prompt_data:
if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
"""
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
"""
block_list = []
if len(self.roles) > 0:
for r in self.roles:
# Don't insert tasks from ``import/include_role``, preventing
# duplicate execution at the wrong time
if r.from_include:
continue
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
"""
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
"""
block_list = []
if len(self.roles) > 0:
for r in self.roles:
if r.from_include:
continue
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
"""
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
"""
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block(play=self)
t = Task(block=flush_block)
t.action = 'meta'
t._resolved_action = 'ansible.builtin.meta'
t.args['_raw_params'] = 'flush_handlers'
t.implicit = True
t.set_loader(self._loader)
t.tags = ['always']
flush_block.block = [t]
# NOTE keep flush_handlers tasks even if a section has no regular tasks,
# there may be notified handlers from the previous section
# (typically when a handler notifies a handler defined before)
block_list = []
if self.force_handlers:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.implicit = True
noop_task.set_loader(self._loader)
b = Block(play=self)
if self.pre_tasks:
b.block = self.pre_tasks
else:
nt = noop_task.copy(exclude_parent=True)
nt._parent = b
b.block = [nt]
b.always = [flush_block]
block_list.append(b)
tasks = self._compile_roles() + self.tasks
b = Block(play=self)
if tasks:
b.block = tasks
else:
nt = noop_task.copy(exclude_parent=True)
nt._parent = b
b.block = [nt]
b.always = [flush_block]
block_list.append(b)
b = Block(play=self)
if self.post_tasks:
b.block = self.post_tasks
else:
nt = noop_task.copy(exclude_parent=True)
nt._parent = b
b.block = [nt]
b.always = [flush_block]
block_list.append(b)
return block_list
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
elif not isinstance(self.vars_files, list):
return [self.vars_files]
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def copy(self):
new_me = super(Play, self).copy()
new_me.role_cache = self.role_cache.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
new_me._action_groups = self._action_groups
new_me._group_actions = self._group_actions
return new_me
def _post_validate_validate_argspec(self, attr: NonInheritableFieldAttribute, value: object, templar: _TE) -> str | None:
"""Validate user input is a bool or string, and return the corresponding argument spec name."""
# Ensure the configuration is valid
if isinstance(value, str):
try:
value = templar.template(value)
except AnsibleValueOmittedError:
value = False
if not isinstance(value, (str, bool)):
raise AnsibleParserError(f"validate_argspec must be a boolean or string, not {type(value)}", obj=value)
# Short-circuit if configuration is turned off or inapplicable
if not value or self._origin is None:
return None
# Use the requested argument spec or fall back to the play name
argspec_name = None
if isinstance(value, str):
argspec_name = value
elif self._ds.get("name"):
argspec_name = self.name
metadata_err = argspec_err = ""
if not argspec_name:
argspec_err = (
"A play name is required when validate_argspec is True. "
"Alternatively, set validate_argspec to the name of an argument spec."
)
if self._metadata_path is None:
metadata_err = "A playbook meta file is required. Considered:\n - "
metadata_err += "\n - ".join([path.as_posix() for path in self._metadata_candidate_paths])
if metadata_err or argspec_err:
error = f"{argspec_err + (' ' if argspec_err else '')}{metadata_err}"
raise AnsibleParserError(error, obj=self._origin)
metadata = self._loader.load_from_file(self._metadata_path)
try:
metadata = metadata['argument_specs']
metadata = metadata[argspec_name]
options = metadata['options']
except (TypeError, KeyError):
options = None
if not isinstance(options, dict):
raise AnsibleParserError(
f"No argument spec named '{argspec_name}' in {self._metadata_path}. Minimally expected:\n"
+ yaml_dump({"argument_specs": {f"{argspec_name!s}": {"options": {}}}}),
obj=metadata,
)
return argspec_name
@property
def _metadata_candidate_paths(self) -> list[_pathlib.Path]:
"""A list of possible playbook.meta paths in configured order."""
extensions = C.config.get_config_value("YAML_FILENAME_EXTENSIONS")
if self._origin.path.endswith(tuple(extensions)):
playbook_without_ext = self._origin.path.rsplit('.', 1)[0]
else:
playbook_without_ext = self._origin.path
return [_pathlib.Path(playbook_without_ext + ".meta" + ext) for ext in extensions + ['']]
@_functools.cached_property
def _metadata_path(self) -> str | None:
"""Locate playbook meta path:
playbook{ext?} -> playbook.meta{ext?}
"""
if self._origin is None:
# adhoc, ansible-console don't have an associated playbook
return None
for candidate in self._metadata_candidate_paths:
if candidate.is_file():
return candidate.as_posix()
return None
@property
def argument_spec(self) -> dict:
"""Retrieve the argument spec if one is configured."""
if not self.validate_argspec:
return {}
return self._loader.load_from_file(self._metadata_path)['argument_specs'][self.validate_argspec]['options']
| Play |
python | kamyu104__LeetCode-Solutions | Python/k-th-nearest-obstacle-queries.py | {
"start": 55,
"end": 524
} | class ____(object):
def resultsArray(self, queries, k):
"""
:type queries: List[List[int]]
:type k: int
:rtype: List[int]
"""
result = []
max_heap = []
for x, y in queries:
heapq.heappush(max_heap, -(abs(x)+abs(y)))
if len(max_heap) == k+1:
heapq.heappop(max_heap)
result.append(-max_heap[0] if len(max_heap) == k else -1)
return result
| Solution |
python | ray-project__ray | python/ray/data/preprocessors/encoder.py | {
"start": 20562,
"end": 26681
} | class ____(SerializablePreprocessorBase):
r"""Encode labels as integer targets.
:class:`LabelEncoder` encodes labels as integer targets that range from
:math:`0` to :math:`n - 1`, where :math:`n` is the number of unique labels.
If you transform a label that isn't in the fitted datset, then the label is encoded
as ``float("nan")``.
Examples:
>>> import pandas as pd
>>> import ray
>>> df = pd.DataFrame({
... "sepal_width": [5.1, 7, 4.9, 6.2],
... "sepal_height": [3.5, 3.2, 3, 3.4],
... "species": ["setosa", "versicolor", "setosa", "virginica"]
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>>
>>> from ray.data.preprocessors import LabelEncoder
>>> encoder = LabelEncoder(label_column="species")
>>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP
sepal_width sepal_height species
0 5.1 3.5 0
1 7.0 3.2 1
2 4.9 3.0 0
3 6.2 3.4 2
You can also provide the name of the output column that should hold the encoded
labels if you want to use :class:`LabelEncoder` in append mode.
>>> encoder = LabelEncoder(label_column="species", output_column="species_encoded")
>>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP
sepal_width sepal_height species species_encoded
0 5.1 3.5 setosa 0
1 7.0 3.2 versicolor 1
2 4.9 3.0 setosa 0
3 6.2 3.4 virginica 2
If you transform a label not present in the original dataset, then the new
label is encoded as ``float("nan")``.
>>> df = pd.DataFrame({
... "sepal_width": [4.2],
... "sepal_height": [2.7],
... "species": ["bracteata"]
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> encoder.transform(ds).to_pandas() # doctest: +SKIP
sepal_width sepal_height species
0 4.2 2.7 NaN
Args:
label_column: A column containing labels that you want to encode.
output_column: The name of the column that will contain the encoded
labels. If None, the output column will have the same name as the
input column.
.. seealso::
:class:`OrdinalEncoder`
If you're encoding ordered features, use :class:`OrdinalEncoder` instead of
:class:`LabelEncoder`.
"""
def __init__(self, label_column: str, *, output_column: Optional[str] = None):
super().__init__()
self.label_column = label_column
self.output_column = output_column or label_column
def _fit(self, dataset: "Dataset") -> Preprocessor:
self.stat_computation_plan.add_callable_stat(
stat_fn=lambda key_gen: compute_unique_value_indices(
dataset=dataset,
columns=[self.label_column],
key_gen=key_gen,
),
post_process_fn=unique_post_fn(),
stat_key_fn=lambda col: f"unique({col})",
post_key_fn=lambda col: f"unique_values({col})",
columns=[self.label_column],
)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, self.label_column)
def column_label_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df[self.output_column] = df[self.label_column].transform(column_label_encoder)
return df
def inverse_transform(self, ds: "Dataset") -> "Dataset":
"""Inverse transform the given dataset.
Args:
ds: Input Dataset that has been fitted and/or transformed.
Returns:
ray.data.Dataset: The inverse transformed Dataset.
Raises:
PreprocessorNotFittedException: if ``fit`` is not called yet.
"""
fit_status = self.fit_status()
if fit_status in (
Preprocessor.FitStatus.PARTIALLY_FITTED,
Preprocessor.FitStatus.NOT_FITTED,
):
raise PreprocessorNotFittedException(
"`fit` must be called before `inverse_transform`, "
)
kwargs = self._get_transform_config()
return ds.map_batches(
self._inverse_transform_pandas, batch_format=BatchFormat.PANDAS, **kwargs
)
def _inverse_transform_pandas(self, df: pd.DataFrame):
def column_label_decoder(s: pd.Series):
inverse_values = {
value: key
for key, value in self.stats_[
f"unique_values({self.label_column})"
].items()
}
return s.map(inverse_values)
df[self.label_column] = df[self.output_column].transform(column_label_decoder)
return df
def get_input_columns(self) -> List[str]:
return [self.label_column]
def get_output_columns(self) -> List[str]:
return [self.output_column]
def _get_serializable_fields(self) -> Dict[str, Any]:
return {
"label_column": self.label_column,
"output_column": self.output_column,
"_fitted": getattr(self, "_fitted", None),
}
def _set_serializable_fields(self, fields: Dict[str, Any], version: int):
# required fields
self.label_column = fields["label_column"]
self.output_column = fields["output_column"]
# optional fields
self._fitted = fields.get("_fitted")
def __repr__(self):
return f"{self.__class__.__name__}(label_column={self.label_column!r}, output_column={self.output_column!r})"
@PublicAPI(stability="alpha")
@SerializablePreprocessor(version=1, identifier="io.ray.preprocessors.categorizer")
| LabelEncoder |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 8265,
"end": 8383
} | class ____(str, Enum):
"""
Permission to view the settings of the job.
"""
canview = "CAN_VIEW"
| CanView |
python | django__django | tests/urlpatterns_reverse/nested_urls.py | {
"start": 132,
"end": 424
} | class ____(View):
pass
nested = (
[
path("view1/", view1, name="view1"),
path("view3/", View3.as_view(), name="view3"),
],
"backend",
)
urlpatterns = [
path("some/path/", include(nested, namespace="nested")),
path("view2/", view2, name="view2"),
]
| View3 |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_organization_monitor_index.py | {
"start": 17855,
"end": 27122
} | class ____(MonitorTestCase):
endpoint = "sentry-api-0-organization-monitor-index"
method = "post"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
@patch("sentry.analytics.record")
def test_simple(self, mock_record: MagicMock) -> None:
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"owner": f"user:{self.user.id}",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
with outbox_runner():
response = self.get_success_response(self.organization.slug, **data)
monitor = Monitor.objects.get(slug=response.data["slug"])
assert monitor.organization_id == self.organization.id
assert monitor.project_id == self.project.id
assert monitor.name == "My Monitor"
assert monitor.status == ObjectStatus.ACTIVE
assert monitor.owner_user_id == self.user.id
assert monitor.owner_team_id is None
assert monitor.config == {
"schedule_type": ScheduleType.CRONTAB,
"schedule": "0 0 * * *",
"checkin_margin": None,
"max_runtime": None,
"failure_issue_threshold": None,
"recovery_threshold": None,
}
assert_org_audit_log_exists(
organization=self.organization,
event=audit_log.get_event_id("MONITOR_ADD"),
data={"upsert": False, **monitor.get_audit_log_data()},
)
assert get_detector_for_monitor(monitor) is not None
self.project.refresh_from_db()
assert self.project.flags.has_cron_monitors
assert_any_analytics_event(
mock_record,
CronMonitorCreated(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=self.project.id,
from_upsert=False,
),
)
assert_any_analytics_event(
mock_record,
FirstCronMonitorCreated(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=self.project.id,
from_upsert=False,
),
)
def test_slug(self) -> None:
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "my-monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_success_response(self.organization.slug, **data)
assert response.data["slug"] == "my-monitor"
def test_invalid_numeric_slug(self) -> None:
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert response.data["slug"][0] == DEFAULT_SLUG_ERROR_MESSAGE
def test_generated_slug_not_entirely_numeric(self) -> None:
data = {
"project": self.project.slug,
"name": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_success_response(self.organization.slug, **data, status_code=201)
slug = response.data["slug"]
assert slug.startswith("1234-")
assert not slug.isdecimal()
def test_crontab_whitespace(self) -> None:
data = {
"project": self.project.slug,
"name": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": " *\t* * * * "},
}
response = self.get_success_response(self.organization.slug, **data, status_code=201)
schedule = response.data["config"]["schedule"]
assert schedule == "* * * * *"
@override_settings(MAX_MONITORS_PER_ORG=2)
def test_monitor_organization_limit(self) -> None:
for i in range(settings.MAX_MONITORS_PER_ORG):
data = {
"project": self.project.slug,
"name": f"Unicron-{i}",
"slug": f"unicron-{i}",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
self.get_success_response(self.organization.slug, **data)
data = {
"project": self.project.slug,
"name": f"Unicron-{settings.MAX_MONITORS_PER_ORG + 1}",
"slug": f"unicron-{settings.MAX_MONITORS_PER_ORG + 1}",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_error_response(self.organization.slug, status_code=400, **data)
assert response.data["nonFieldErrors"] == [
ErrorDetail(
f"You may not exceed {settings.MAX_MONITORS_PER_ORG} monitors per organization",
code="invalid",
)
]
def test_simple_with_alert_rule(self) -> None:
from sentry.workflow_engine.models import AlertRuleWorkflow, DetectorWorkflow, Workflow
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
"alert_rule": {
"environment": self.environment.name,
"targets": [{"targetIdentifier": self.user.id, "targetType": "Member"}],
},
}
response = self.get_success_response(self.organization.slug, **data)
monitor = Monitor.objects.get(slug=response.data["slug"])
alert_rule_id = monitor.config["alert_rule_id"]
rule = Rule.objects.get(
project_id=monitor.project_id, id=alert_rule_id, source=RuleSource.CRON_MONITOR
)
assert rule is not None
assert rule.environment_id == self.environment.id
# Verify the detector was created and linked to the workflow
detector = get_detector_for_monitor(monitor)
assert detector is not None
# Verify the workflow was created for the rule
alert_rule_workflow = AlertRuleWorkflow.objects.get(rule_id=rule.id)
workflow = Workflow.objects.get(id=alert_rule_workflow.workflow.id)
# Verify the detector is linked to the workflow
assert DetectorWorkflow.objects.filter(detector=detector, workflow=workflow).exists()
def test_checkin_margin_zero(self) -> None:
# Invalid checkin margin
#
# XXX(epurkhiser): We currently transform 0 -> 1 for backwards
# compatability. If we remove the custom transformer in the config
# validator this test will chagne to a get_error_response test.
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "cron_job",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily", "checkin_margin": 0},
}
response = self.get_success_response(self.organization.slug, **data)
assert Monitor.objects.get(slug=response.data["slug"]).config["checkin_margin"] == 1
@patch("sentry.quotas.backend.assign_seat")
def test_create_monitor_assigns_seat(self, assign_seat: MagicMock) -> None:
assign_seat.return_value = Outcome.ACCEPTED
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_success_response(self.organization.slug, **data)
monitor = Monitor.objects.get(slug=response.data["slug"])
assign_seat.assert_called_with(DataCategory.MONITOR_SEAT, monitor)
assert monitor.status == ObjectStatus.ACTIVE
@patch("sentry.quotas.backend.assign_seat")
def test_create_monitor_without_seat(self, assign_seat: MagicMock) -> None:
assign_seat.return_value = Outcome.RATE_LIMITED
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
response = self.get_success_response(self.organization.slug, **data)
monitor = Monitor.objects.get(slug=response.data["slug"])
assert assign_seat.called
assert response.data["status"] == "disabled"
assert monitor.status == ObjectStatus.DISABLED
def test_invalid_schedule(self) -> None:
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
# There is no Febuary 31st
"config": {"schedule_type": "crontab", "schedule": "0 0 31 2 *"},
}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert response.data["config"]["schedule"][0] == "Schedule is invalid"
| CreateOrganizationMonitorTest |
python | pytorch__pytorch | test/dynamo/test_modes.py | {
"start": 825,
"end": 1117
} | class ____(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args, kwargs=None):
if not kwargs:
kwargs = {}
if func == torch.add:
return torch.zeros(2, 2)
return super().__torch_function__(func, types, args, kwargs)
| TestMode |
python | wandb__wandb | wandb/vendor/pygments/lexers/python.py | {
"start": 11159,
"end": 18697
} | class ____(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
.. versionadded:: 0.10
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting (still valid in Py3)
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# the new style '{}'.format(...) string formatting
(r'\{'
'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
'(\![sra])?' # conversion
'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
'\}', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%{\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%|(\{{1,2})', ttype)
# newlines are an error (use "nl" state)
]
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(words((
'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'raise', 'nonlocal', 'return', 'try', 'while', 'yield', 'yield from',
'as', 'with'), suffix=r'\b'),
Keyword),
(words((
'True', 'False', 'None'), suffix=r'\b'),
Keyword.Constant),
]
tokens['builtins'] = [
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict',
'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list',
'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct',
'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str',
'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)',
suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError',
'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
'RuntimeError', 'RuntimeWarning', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
'WindowsError', 'ZeroDivisionError',
# new builtin exceptions from PEP 3151
'BlockingIOError', 'ChildProcessError', 'ConnectionError',
'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
'PermissionError', 'ProcessLookupError', 'TimeoutError'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
]
tokens['magicfuncs'] = [
(words((
'__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', '__and__',
'__anext__', '__await__', '__bool__', '__bytes__', '__call__',
'__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
'__delitem__', '__dir__', '__divmod__', '__enter__', '__eq__', '__exit__',
'__float__', '__floordiv__', '__format__', '__ge__', '__get__',
'__getattr__', '__getattribute__', '__getitem__', '__gt__', '__hash__',
'__iadd__', '__iand__', '__ifloordiv__', '__ilshift__', '__imatmul__',
'__imod__', '__import__', '__imul__', '__index__', '__init__',
'__instancecheck__', '__int__', '__invert__', '__ior__', '__ipow__',
'__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__',
'__le__', '__len__', '__length_hint__', '__lshift__', '__lt__',
'__matmul__', '__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
'__new__', '__next__', '__or__', '__pos__', '__pow__', '__prepare__',
'__radd__', '__rand__', '__rdivmod__', '__repr__', '__reversed__',
'__rfloordiv__', '__rlshift__', '__rmatmul__', '__rmod__', '__rmul__',
'__ror__', '__round__', '__rpow__', '__rrshift__', '__rshift__',
'__rsub__', '__rtruediv__', '__rxor__', '__set__', '__setattr__',
'__setitem__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
'__xor__'), suffix=r'\b'),
Name.Function.Magic),
]
tokens['magicvars'] = [
(words((
'__annotations__', '__bases__', '__class__', '__closure__', '__code__',
'__defaults__', '__dict__', '__doc__', '__file__', '__func__',
'__globals__', '__kwdefaults__', '__module__', '__mro__', '__name__',
'__objclass__', '__qualname__', '__self__', '__slots__', '__weakref__'),
suffix=r'\b'),
Name.Variable.Magic),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@\w+', Name.Decorator),
(r'@', Operator), # new matrix multiplication operator
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
default('#pop'),
]
tokens['strings-single'] = innerstring_rules(String.Single)
tokens['strings-double'] = innerstring_rules(String.Double)
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
| Python3Lexer |
python | sympy__sympy | sympy/physics/mechanics/loads.py | {
"start": 2301,
"end": 5406
} | class ____(LoadBase):
"""Torque acting upon a frame.
Explanation
===========
A torque is a free vector that is acting on a reference frame, which is
associated with a rigid body. This class stores both the frame and the
vector. A tuple can also be used, with the location as the first item and
the vector as second item.
Examples
========
A torque of magnitude 2 about N.x acting on a frame N can be created as
follows:
>>> from sympy.physics.mechanics import ReferenceFrame, Torque
>>> N = ReferenceFrame('N')
>>> Torque(N, 2 * N.x)
(N, 2*N.x)
If a body is supplied, then the frame fixed to that body is used.
>>> from sympy.physics.mechanics import RigidBody
>>> rb = RigidBody('rb', frame=N)
>>> Torque(rb, 2 * N.x)
(N, 2*N.x)
"""
def __new__(cls, frame, torque):
if isinstance(frame, BodyBase):
frame = frame.frame
if not isinstance(frame, ReferenceFrame):
raise TypeError('Torque location should be a ReferenceFrame.')
if not isinstance(torque, Vector):
raise TypeError('Torque vector should be a Vector.')
return super().__new__(cls, frame, torque)
def __repr__(self):
return (f'{self.__class__.__name__}(frame={self.frame}, '
f'torque={self.torque})')
@property
def frame(self):
return self.location
@property
def torque(self):
return self.vector
def gravity(acceleration, *bodies):
"""
Returns a list of gravity forces given the acceleration
due to gravity and any number of particles or rigidbodies.
Example
=======
>>> from sympy.physics.mechanics import ReferenceFrame, Particle, RigidBody
>>> from sympy.physics.mechanics.loads import gravity
>>> from sympy import symbols
>>> N = ReferenceFrame('N')
>>> g = symbols('g')
>>> P = Particle('P')
>>> B = RigidBody('B')
>>> gravity(g*N.y, P, B)
[(P_masscenter, P_mass*g*N.y),
(B_masscenter, B_mass*g*N.y)]
"""
gravity_force = []
for body in bodies:
if not isinstance(body, BodyBase):
raise TypeError(f'{type(body)} is not a body type')
gravity_force.append(Force(body.masscenter, body.mass * acceleration))
return gravity_force
def _parse_load(load):
"""Helper function to parse loads and convert tuples to load objects."""
if isinstance(load, LoadBase):
return load
elif isinstance(load, tuple):
if len(load) != 2:
raise ValueError(f'Load {load} should have a length of 2.')
if isinstance(load[0], Point):
return Force(load[0], load[1])
elif isinstance(load[0], ReferenceFrame):
return Torque(load[0], load[1])
else:
raise ValueError(f'Load not recognized. The load location {load[0]}'
f' should either be a Point or a ReferenceFrame.')
raise TypeError(f'Load type {type(load)} not recognized as a load. It '
f'should be a Force, Torque or tuple.')
| Torque |
python | doocs__leetcode | solution/1700-1799/1786.Number of Restricted Paths From First to Last Node/Solution2.py | {
"start": 0,
"end": 794
} | class ____:
def countRestrictedPaths(self, n: int, edges: List[List[int]]) -> int:
g = defaultdict(list)
for u, v, w in edges:
g[u].append((v, w))
g[v].append((u, w))
dist = [inf] * (n + 1)
dist[n] = 0
q = [(0, n)]
mod = 10**9 + 7
while q:
_, u = heappop(q)
for v, w in g[u]:
if dist[v] > dist[u] + w:
dist[v] = dist[u] + w
heappush(q, (dist[v], v))
arr = list(range(1, n + 1))
arr.sort(key=lambda i: dist[i])
f = [0] * (n + 1)
f[n] = 1
for i in arr:
for j, _ in g[i]:
if dist[i] > dist[j]:
f[i] = (f[i] + f[j]) % mod
return f[1]
| Solution |
python | mlflow__mlflow | mlflow/exceptions.py | {
"start": 5689,
"end": 5818
} | class ____(MlflowException):
"""Exception thrown when expected configuration file/directory not found"""
| MissingConfigException |
python | ansible__ansible | test/lib/ansible_test/_internal/completion.py | {
"start": 457,
"end": 730
} | class ____(enum.Enum):
"""The control group version(s) required by a container."""
NONE = 'none'
V1_ONLY = 'v1-only'
V2_ONLY = 'v2-only'
V1_V2 = 'v1-v2'
def __repr__(self) -> str:
return f'{self.__class__.__name__}.{self.name}'
| CGroupVersion |
python | apache__airflow | devel-common/src/tests_common/test_utils/logs.py | {
"start": 2806,
"end": 11246
} | class ____:
"""
Test that structlog messages are logged.
This extends the feature built in to structlog to make it easier to find if a message is logged.
>>> def test_something(cap_structlog):
... log.info("some event", field1=False, field2=[1, 2])
... log.info("some event", field1=True)
... assert "some_event" in cap_structlog # a string searches on `event` field
... assert {"event": "some_event", "field1": True} in cap_structlog # Searches only on passed fields
... assert {"field2": [1, 2]} in cap_structlog
...
... assert "not logged" not in cap_structlog # not in works too
This fixture class will also manage the log level of stdlib loggers via ``at_level`` and ``set_level``.
"""
# This class is a manual mixing of pytest's LogCaptureFixture and structlog's LogCapture class, but
# tailored to Airflow's "send all logs via structlog" approach
_logger: str | None = None
NAME_TO_LEVEL: dict[str, int]
PER_LOGGER_LEVELS: MutableMapping[str, int]
"""The logger we specifically want to capture log messages from"""
def __init__(self):
self.entries = []
self._initial_logger_levels: dict[str, int] = {}
try:
from airflow.sdk._shared.logging.structlog import NAME_TO_LEVEL, PER_LOGGER_LEVELS
self.NAME_TO_LEVEL = NAME_TO_LEVEL
self.PER_LOGGER_LEVELS = PER_LOGGER_LEVELS
try:
import airflow._shared.logging.structlog
except ModuleNotFoundError:
pass
else:
airflow._shared.logging.structlog.PER_LOGGER_LEVELS = PER_LOGGER_LEVELS
except ModuleNotFoundError:
from airflow._shared.logging.structlog import NAME_TO_LEVEL, PER_LOGGER_LEVELS
self.NAME_TO_LEVEL = NAME_TO_LEVEL
self.PER_LOGGER_LEVELS = PER_LOGGER_LEVELS
def _finalize(self) -> None:
"""
Finalize the fixture.
This restores the log levels and the disabled logging levels changed by :meth:`set_level`.
"""
for logger_name, level in self._initial_logger_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
if level is logging.NOTSET:
del self.PER_LOGGER_LEVELS[logger_name]
else:
self.PER_LOGGER_LEVELS[logger_name] = level
def __contains__(self, target):
import operator
if isinstance(target, str):
def predicate(e):
return e["event"] == target
elif isinstance(target, dict):
# Partial comparison -- only check keys passed in
get = operator.itemgetter(*target.keys())
want = tuple(target.values())
def predicate(e):
try:
got = get(e)
return all(
expected.match(val) if isinstance(expected, re.Pattern) else val == expected
for (val, expected) in zip(got, want)
)
except Exception:
return False
else:
raise TypeError(f"Can't search logs using {type(target)}")
return any(predicate(e) for e in self.entries)
def __getitem__(self, i):
return self.entries[i]
def __iter__(self):
return iter(self.entries)
def __repr__(self):
return f"<StructlogCapture #entries={len(self.entries)}>"
def __call__(self, logger: WrappedLogger, method_name: str, event_dict: EventDict) -> NoReturn:
from structlog import DropEvent
from structlog._log_levels import map_method_name
event_dict["event"] = str(event_dict["event"])
event_dict["log_level"] = map_method_name(method_name)
if name := getattr(logger, "name", None):
event_dict["logger_name"] = name
# Capture the current exception. This mirrors the "ExceptionRenderer", but much more minimal for
# testing
if event_dict.get("exc_info") is True:
event_dict["exc_info"] = sys.exc_info()
self.entries.append(event_dict)
raise DropEvent
@property
def text(self):
"""All the event text as a single multi-line string."""
def exc_dict_to_string(exc):
if isinstance(exc, tuple):
yield from traceback.format_exception(*exc)
return
for i, e in enumerate(exc):
if i != 0:
yield "\n"
yield "During handling of the above exception, another exception occurred:\n"
yield "\n"
# This doesn't include the stacktrace, but this should be enough for testing
yield f"{e['exc_type']}: {e['exc_value']}\n"
def format(e):
yield e["event"] + "\n"
if exc_info := e.get("exc_info"):
yield from exc_dict_to_string(exc_info)
elif exc := e.get("exception"):
yield from exc_dict_to_string(exc)
return "".join(itertools.chain.from_iterable(map(format, self.entries)))
# These next fns make it duck-type the same as Pytests "caplog" fixture
@property
def messages(self):
"""All the event messages as a list."""
return [e["event"] for e in self.entries]
@contextmanager
def at_level(self, level: str | int, logger: str | None = None):
if isinstance(level, str):
level = self.NAME_TO_LEVEL[level.lower()]
# Since we explicitly set the level of the "airflow" logger in our config, we want to set that by
# default if the test auithor didn't ask for this at a specific logger to be set (otherwise we only
# set the root logging level, which doesn't have any affect if sub loggers have explicit levels set)
keys: tuple[str, ...] = (logger or "",)
if not logger:
keys += ("airflow",)
def _reset(logger, key, level, orig_hdlr_level):
logger.setLevel(level)
if level is logging.NOTSET:
del self.PER_LOGGER_LEVELS[key]
else:
self.PER_LOGGER_LEVELS[key] = level
if logger.handlers:
hdlr = logger.handlers[0]
hdlr.setLevel(orig_hdlr_level)
cm = ExitStack()
for key in keys:
old = self.PER_LOGGER_LEVELS.get(key, logging.NOTSET)
self.PER_LOGGER_LEVELS[key] = level
stdlogger = logging.getLogger(key)
stdlogger.setLevel(level)
hdlr = orig_hdlr_level = None
if stdlogger.handlers:
hdlr = stdlogger.handlers[0]
orig_hdlr_level = hdlr.level
hdlr.setLevel(level)
cm.callback(_reset, stdlogger, key, old, orig_hdlr_level)
with cm:
yield self
def set_level(self, level: str | int, logger: str | None = None):
# Set the global level
if isinstance(level, str):
level = self.NAME_TO_LEVEL[level.lower()]
key = logger or ""
stdlogger = logging.getLogger(key)
self._initial_logger_levels[key] = self.PER_LOGGER_LEVELS.get(key, logging.NOTSET)
self.PER_LOGGER_LEVELS[key] = level
stdlogger.setLevel(level)
def clear(self):
self.entries = []
# pytest caplog support:
# TODO: deprecate and remove all of this in tests
@property
def records(self):
records = []
for entry in self.entries:
record = logging.LogRecord(
entry.get("logger", "") or entry.get("logger_name"),
self.NAME_TO_LEVEL.get(entry.get("log_level"), 0),
"?",
0,
entry["event"],
(),
entry.get("exc_info") or entry.get("exception"),
None,
None,
)
record.message = record.msg
records.append(record)
return records
@property
def record_tuples(self):
return [
(
entry.get("logger", "") or entry.get("logger_name"),
self.NAME_TO_LEVEL.get(entry.get("log_level"), 0),
entry.get("event"),
)
for entry in self.entries
]
| StructlogCapture |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_db_command.py | {
"start": 1184,
"end": 26242
} | class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(["db", "reset", "--yes"]))
mock_resetdb.assert_called_once_with(skip_init=False)
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb_skip_init(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(["db", "reset", "--yes", "--skip-init"]))
mock_resetdb.assert_called_once_with(skip_init=True)
def test_run_db_migrate_command_success_and_messages(self, capsys):
class Args:
to_revision = None
to_version = None
from_revision = None
from_version = None
show_sql_only = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_migrate_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing upgrade" in out
assert "Database migrating done!" in out
assert called == {"to_revision": None, "from_revision": None, "show_sql_only": False}
def test_run_db_migrate_command_offline_generation(self, capsys):
class Args:
to_revision = None
to_version = None
from_revision = None
from_version = None
show_sql_only = True
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_migrate_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Generating sql for upgrade" in out
assert called == {"to_revision": None, "from_revision": None, "show_sql_only": True}
@pytest.mark.parametrize(
("args", "match"),
[
(
{
"to_revision": "abc",
"to_version": "2.10.0",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Cannot supply both",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": "2.10.0",
"show_sql_only": True,
},
"Cannot supply both",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": None,
"show_sql_only": False,
},
"only .* with `--show-sql-only`",
),
(
{
"to_revision": None,
"to_version": "abc",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Invalid version",
),
(
{
"to_revision": None,
"to_version": "2.1.25",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Unknown version",
),
],
)
def test_run_db_migrate_command_validation_errors(self, args, match):
class Args:
to_revision = args["to_revision"]
to_version = args["to_version"]
from_revision = args["from_revision"]
from_version = args["from_version"]
show_sql_only = args["show_sql_only"]
def fake_command(**kwargs):
pass
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match=match):
db_command.run_db_migrate_command(Args(), fake_command, heads)
@mock.patch("airflow.cli.commands.db_command.db.check_migrations")
def test_cli_check_migrations(self, mock_wait_for_migrations):
db_command.check_migrations(self.parser.parse_args(["db", "check-migrations"]))
mock_wait_for_migrations.assert_called_once_with(timeout=60)
@pytest.mark.parametrize(
("args", "called_with"),
[
(
[],
dict(
to_revision=None,
from_revision=None,
show_sql_only=False,
),
),
(
["--show-sql-only"],
dict(
to_revision=None,
from_revision=None,
show_sql_only=True,
),
),
(
["--to-revision", "abc"],
dict(
to_revision="abc",
from_revision=None,
show_sql_only=False,
),
),
(
["--to-revision", "abc", "--show-sql-only"],
dict(to_revision="abc", from_revision=None, show_sql_only=True),
),
(
["--to-version", "2.10.0"],
dict(
to_revision="22ed7efa9da2",
from_revision=None,
show_sql_only=False,
),
),
(
["--to-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision=None,
show_sql_only=True,
),
),
(
["--to-revision", "abc", "--from-revision", "abc123", "--show-sql-only"],
dict(
to_revision="abc",
from_revision="abc123",
show_sql_only=True,
),
),
(
["--to-revision", "abc", "--from-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="abc",
from_revision="22ed7efa9da2",
show_sql_only=True,
),
),
(
["--to-version", "2.10.0", "--from-revision", "abc123", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision="abc123",
show_sql_only=True,
),
),
(
["--to-version", "2.10.0", "--from-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision="22ed7efa9da2",
show_sql_only=True,
),
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_upgrade_success(self, mock_upgradedb, args, called_with):
# TODO(ephraimbuddy): Revisit this when we add more migration files and use other versions/revisions other than 2.10.0/22ed7efa9da2
db_command.migratedb(self.parser.parse_args(["db", "migrate", *args]))
mock_upgradedb.assert_called_once_with(**called_with)
@pytest.mark.parametrize(
("args", "pattern"),
[
pytest.param(
["--to-revision", "abc", "--to-version", "2.10.0"],
"Cannot supply both",
id="to both version and revision",
),
pytest.param(
["--from-revision", "abc", "--from-version", "2.10.0"],
"Cannot supply both",
id="from both version and revision",
),
pytest.param(["--to-version", "2.1.25"], "Unknown version '2.1.25'", id="unknown to version"),
pytest.param(["--to-version", "abc"], "Invalid version 'abc'", id="invalid to version"),
pytest.param(
["--to-revision", "abc", "--from-revision", "abc123"],
"used with `--show-sql-only`",
id="requires offline",
),
pytest.param(
["--to-revision", "abc", "--from-version", "2.10.0"],
"used with `--show-sql-only`",
id="requires offline",
),
pytest.param(
["--to-revision", "2.10.0", "--from-version", "2.1.25", "--show-sql-only"],
"Unknown version '2.1.25'",
id="unknown from version",
),
pytest.param(
["--to-revision", "2.10.0", "--from-version", "abc", "--show-sql-only"],
"Invalid version 'abc'",
id="invalid from version",
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_sync_failure(self, mock_upgradedb, args, pattern):
with pytest.raises(SystemExit, match=pattern):
db_command.migratedb(self.parser.parse_args(["db", "migrate", *args]))
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("mysql://root@mysql:3306/airflow"),
)
def test_cli_shell_mysql(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["mysql", "--defaults-extra-file=/tmp/name"])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b"[client]\nhost = mysql\nuser = root\npassword = \nport = 3306\ndatabase = airflow"
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("mysql://root@mysql/airflow"),
)
def test_cli_shell_mysql_without_port(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["mysql", "--defaults-extra-file=/tmp/name"])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b"[client]\nhost = mysql\nuser = root\npassword = \nport = 3306\ndatabase = airflow"
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("sqlite:////root/airflow/airflow.db"),
)
def test_cli_shell_sqlite(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["sqlite3", "/root/airflow/airflow.db"])
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres:5432/airflow"),
)
def test_cli_shell_postgres(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg://postgres:airflow@postgres:5432/airflow"),
)
def test_cli_shell_postgres_ppg3(self, mock_execute_interactive):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_postgres_without_port(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_postgres_without_port_ppg3(self, mock_execute_interactive):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("invalid+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_invalid(self):
with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg2"):
db_command.shell(self.parser.parse_args(["db", "shell"]))
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("invalid+psycopg://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_invalid_ppg3(self):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg"):
db_command.shell(self.parser.parse_args(["db", "shell"]))
def test_run_db_downgrade_command_success_and_messages(self, capsys):
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = True
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing downgrade" in out
assert "Downgrade complete" in out
assert called == {"to_revision": "abc", "from_revision": None, "show_sql_only": False}
def test_run_db_downgrade_command_offline_generation(self, capsys):
class Args:
to_revision = None
to_version = "2.10.0"
from_revision = None
from_version = None
show_sql_only = True
yes = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Generating sql for downgrade" in out
assert called == {"to_revision": "22ed7efa9da2", "from_revision": None, "show_sql_only": True}
@pytest.mark.parametrize(
("args", "match"),
[
(
{
"to_revision": None,
"to_version": None,
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": False,
},
"Must provide either",
),
(
{
"to_revision": "abc",
"to_version": "2.10.0",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"Cannot supply both",
),
(
{
"to_revision": "abc",
"to_version": None,
"from_revision": "abc1",
"from_version": "2.10.0",
"show_sql_only": True,
"yes": True,
},
"may not be combined",
),
(
{
"to_revision": None,
"to_version": "2.1.25",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"not supported",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"only .* with `--show-sql-only`",
),
],
)
def test_run_db_downgrade_command_validation_errors(self, args, match):
class Args:
to_revision = args["to_revision"]
to_version = args["to_version"]
from_revision = args["from_revision"]
from_version = args["from_version"]
show_sql_only = args["show_sql_only"]
yes = args["yes"]
def fake_command(**kwargs):
pass
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match=match):
db_command.run_db_downgrade_command(Args(), fake_command, heads)
@mock.patch("airflow.cli.commands.db_command.input")
def test_run_db_downgrade_command_confirmation_yes_calls_command(self, mock_input, capsys):
mock_input.return_value = "Y"
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing downgrade" in out
assert called == {"to_revision": "abc", "from_revision": None, "show_sql_only": False}
@mock.patch("airflow.cli.commands.db_command.input")
def test_run_db_downgrade_command_confirmation_no_cancels(self, mock_input):
mock_input.return_value = "n"
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = False
def fake_command(**kwargs):
raise AssertionError("Command should not be called when cancelled")
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match="Cancelled"):
db_command.run_db_downgrade_command(Args(), fake_command, heads)
@pytest.mark.parametrize(
("args", "match"),
[
(["-y", "--to-revision", "abc", "--to-version", "2.2.0"], "Cannot supply both"),
(["-y", "--to-revision", "abc1", "--from-revision", "abc2"], "only .* with `--show-sql-only`"),
(["-y", "--to-revision", "abc1", "--from-version", "2.2.2"], "only .* with `--show-sql-only`"),
(["-y", "--to-version", "2.2.2", "--from-version", "2.2.2"], "only .* with `--show-sql-only`"),
(
["-y", "--to-revision", "abc", "--from-version", "2.2.0", "--from-revision", "abc"],
"may not be combined",
),
(["-y", "--to-version", "abc"], r"Downgrading to .* not supported\."),
(["-y"], "Must provide either"),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_invalid(self, mock_dg, args, match):
"""We test some options that should produce an error"""
with pytest.raises(SystemExit, match=match):
db_command.downgrade(self.parser.parse_args(["db", "downgrade", *args]))
@pytest.mark.parametrize(
("args", "expected"),
[
(["-y", "--to-revision", "abc1"], dict(to_revision="abc1")),
(
["-y", "--to-revision", "abc1", "--from-revision", "abc2", "-s"],
dict(to_revision="abc1", from_revision="abc2", show_sql_only=True),
),
(
["-y", "--to-revision", "abc1", "--from-version", "2.10.0", "-s"],
dict(to_revision="abc1", from_revision="22ed7efa9da2", show_sql_only=True),
),
(
["-y", "--to-version", "2.10.0", "--from-version", "2.10.0", "-s"],
dict(to_revision="22ed7efa9da2", from_revision="22ed7efa9da2", show_sql_only=True),
),
(["-y", "--to-version", "2.10.0"], dict(to_revision="22ed7efa9da2")),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_good(self, mock_dg, args, expected):
defaults = dict(from_revision=None, show_sql_only=False)
db_command.downgrade(self.parser.parse_args(["db", "downgrade", *args]))
mock_dg.assert_called_with(**{**defaults, **expected})
@pytest.mark.parametrize(
("resp", "raise_"),
[
("y", False),
("Y", False),
("n", True),
("a", True), # any other value
],
)
@mock.patch("airflow.utils.db.downgrade")
@mock.patch("airflow.cli.commands.db_command.input")
def test_cli_downgrade_confirm(self, mock_input, mock_dg, resp, raise_):
mock_input.return_value = resp
if raise_:
with pytest.raises(SystemExit):
db_command.downgrade(self.parser.parse_args(["db", "downgrade", "--to-revision", "abc"]))
else:
db_command.downgrade(self.parser.parse_args(["db", "downgrade", "--to-revision", "abc"]))
mock_dg.assert_called_with(to_revision="abc", from_revision=None, show_sql_only=False)
def test_check(self):
retry, retry_delay = 6, 9 # arbitrary but distinct number
args = self.parser.parse_args(
["db", "check", "--retry", str(retry), "--retry-delay", str(retry_delay)]
)
sleep = MagicMock()
always_pass = Mock()
always_fail = Mock(side_effect=OperationalError("", None, None))
with patch("time.sleep", new=sleep), patch("airflow.utils.db.check", new=always_pass):
db_command.check(args)
always_pass.assert_called_once()
sleep.assert_not_called()
with patch("time.sleep", new=sleep), patch("airflow.utils.db.check", new=always_fail):
with pytest.raises(OperationalError):
db_command.check(args)
# With N retries there are N+1 total checks, hence N sleeps
always_fail.assert_has_calls([call()] * (retry + 1))
sleep.assert_has_calls([call(retry_delay)] * retry)
| TestCliDb |
python | RaRe-Technologies__gensim | gensim/corpora/opinosiscorpus.py | {
"start": 824,
"end": 2834
} | class ____:
"""Creates a corpus and dictionary from the Opinosis dataset.
http://kavita-ganesan.com/opinosis-opinion-dataset/
This data is organized in folders, each folder containing a few short docs.
Data can be obtained quickly using the following commands in bash:
mkdir opinosis && cd opinosis
wget https://github.com/kavgan/opinosis/raw/master/OpinosisDataset1.0_0.zip
unzip OpinosisDataset1.0_0.zip
corpus and dictionary can be accessed by using the .corpus and .id2word members
"""
def __init__(self, path):
"""Load the downloaded corpus.
Parameters
----------
path : string
Path to the extracted zip file. If 'summaries-gold' is in a folder
called 'opinosis', then the Path parameter would be 'opinosis',
either relative to you current working directory or absolute.
"""
# citation
path = os.path.join(path, "summaries-gold")
dictionary = Dictionary()
corpus = []
stemmer = PorterStemmer()
for directory, b, filenames in os.walk(path):
# each subdirectory of path is one collection of reviews to a specific product
# now get the corpus/documents
for filename in filenames:
filepath = directory + os.sep + filename
# write down the document and the topicId and split into train and testdata
with open(filepath) as file:
doc = file.read()
preprocessed_doc = [
stemmer.stem(token) for token in re.findall(r'\w+', doc.lower())
if token not in STOPWORDS
]
dictionary.add_documents([preprocessed_doc])
corpus += [dictionary.doc2bow(preprocessed_doc)]
# and return the results the same way the other corpus generating functions do
self.corpus = corpus
self.id2word = dictionary
| OpinosisCorpus |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_endpoint_service.py | {
"start": 8310,
"end": 14820
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = EndpointServiceHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_create_endpoint(self, mock_client) -> None:
self.hook.create_endpoint(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint=TEST_ENDPOINT,
endpoint_id=TEST_ENDPOINT_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.create_endpoint.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
endpoint=TEST_ENDPOINT,
endpoint_id=TEST_ENDPOINT_ID,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_delete_endpoint(self, mock_client) -> None:
self.hook.delete_endpoint(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint=TEST_ENDPOINT_NAME,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_endpoint.assert_called_once_with(
request=dict(
name=mock_client.return_value.endpoint_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.endpoint_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_ENDPOINT_NAME,
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_deploy_model(self, mock_client) -> None:
self.hook.deploy_model(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint=TEST_ENDPOINT_NAME,
deployed_model=TEST_DEPLOYED_MODEL,
traffic_split=TEST_TRAFFIC_SPLIT,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.deploy_model.assert_called_once_with(
request=dict(
endpoint=mock_client.return_value.endpoint_path.return_value,
deployed_model=TEST_DEPLOYED_MODEL,
traffic_split=TEST_TRAFFIC_SPLIT,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.endpoint_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_ENDPOINT_NAME,
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_get_endpoint(self, mock_client) -> None:
self.hook.get_endpoint(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint=TEST_ENDPOINT_NAME,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_endpoint.assert_called_once_with(
request=dict(
name=mock_client.return_value.endpoint_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.endpoint_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_ENDPOINT_NAME,
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_list_endpoints(self, mock_client) -> None:
self.hook.list_endpoints(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_endpoints.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
filter=None,
page_size=None,
page_token=None,
read_mask=None,
order_by=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_undeploy_model(self, mock_client) -> None:
self.hook.undeploy_model(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint=TEST_ENDPOINT_NAME,
deployed_model_id=TEST_DEPLOYED_MODEL_ID,
traffic_split=TEST_TRAFFIC_SPLIT,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.undeploy_model.assert_called_once_with(
request=dict(
endpoint=mock_client.return_value.endpoint_path.return_value,
deployed_model_id=TEST_DEPLOYED_MODEL_ID,
traffic_split=TEST_TRAFFIC_SPLIT,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.endpoint_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_REGION, TEST_ENDPOINT_NAME
)
@mock.patch(ENDPOINT_SERVICE_STRING.format("EndpointServiceHook.get_endpoint_service_client"))
def test_update_endpoint(self, mock_client) -> None:
self.hook.update_endpoint(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
endpoint_id=TEST_ENDPOINT_NAME,
endpoint=TEST_ENDPOINT,
update_mask=TEST_UPDATE_MASK,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.update_endpoint.assert_called_once_with(
request=dict(
endpoint=TEST_ENDPOINT,
update_mask=TEST_UPDATE_MASK,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
| TestEndpointServiceWithoutDefaultProjectIdHook |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_bigtable.py | {
"start": 1796,
"end": 6920
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook.get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=CLIENT_INFO,
admin=True,
)
assert mock_client.return_value == result
assert self.bigtable_hook_no_default_project_id._client == result
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="example-project")
assert res is not None
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="example-project")
assert res is None
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
)
get_client.assert_called_once_with(project_id="example-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch("google.cloud.bigtable.instance.Instance.update")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_update_instance_overridden_project_id(self, get_client, instance_update):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_update.return_value = operation
res = self.bigtable_hook_no_default_project_id.update_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
instance_display_name=CBT_INSTANCE_DISPLAY_NAME,
instance_type=CBT_INSTANCE_TYPE,
instance_labels=CBT_INSTANCE_LABELS,
)
get_client.assert_called_once_with(project_id="example-project")
instance_update.assert_called_once_with()
assert res.instance_id == "instance"
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE, table_id=CBT_TABLE
)
get_client.assert_called_once_with(project_id="example-project")
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
| TestBigtableHookNoDefaultProjectId |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/launchers/multiprocessing.py | {
"start": 12775,
"end": 14371
} | class ____:
"""Captures a hand-selected set of (global) variables in modules and provides a way to restore them.
It facilitates and encapsulates the transfer of globals like PyTorch's deterministic flags or random generator state
across process boundaries when launching processes with :func:`torch.multiprocessing.spawn`.
Example:
.. code-block:: python
# in main process
snapshot = _GlobalStateSnapshot.capture()
# in worker process
snapshot.restore()
"""
use_deterministic_algorithms: bool
use_deterministic_algorithms_warn_only: bool
cudnn_benchmark: bool
rng_states: dict[str, Any]
@classmethod
def capture(cls) -> "_GlobalStateSnapshot":
"""Capture a few global states from torch, numpy, etc., that we want to restore in a spawned worker process."""
return cls(
use_deterministic_algorithms=torch.are_deterministic_algorithms_enabled(),
use_deterministic_algorithms_warn_only=torch.is_deterministic_algorithms_warn_only_enabled(),
cudnn_benchmark=torch.backends.cudnn.benchmark,
rng_states=_collect_rng_states(),
)
def restore(self) -> None:
"""Restores all globals to the values captured in the :meth:`capture` method."""
torch.use_deterministic_algorithms(
self.use_deterministic_algorithms, warn_only=self.use_deterministic_algorithms_warn_only
)
torch.backends.cudnn.benchmark = self.cudnn_benchmark
_set_rng_states(self.rng_states)
| _GlobalStateSnapshot |
python | tensorflow__tensorflow | tensorflow/virtual_root_template_v2.__init__.py | {
"start": 1228,
"end": 3424
} | class ____(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
def __reduce__(self):
return __import__, (self.__name__,)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.lite",
"tensorflow.keras",
"tensorflow.compat",
"tensorflow.summary", # tensorboard
"tensorflow.examples",
]
# Lazy load all of the _top_level_modules, we don't need their names anymore
for _m in _top_level_modules:
_forward_module(_m)
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
_major_api_version = 2
# These should not be visible in the main tf module.
try:
del core
except NameError:
pass
try:
del python
except NameError:
pass
try:
del compiler
except NameError:
pass
try:
del tools
except NameError:
pass
try:
del examples
except NameError:
pass
# LINT.ThenChange(//tensorflow/virtual_root_template_v1.__init__.py.oss)
| _LazyLoader |
python | walkccc__LeetCode | solutions/1078. Occurrences After Bigram/1078.py | {
"start": 0,
"end": 211
} | class ____:
def findOcurrences(self, text: str, first: str, second: str) -> list[str]:
words = text.split()
return [c for a, b, c in zip(words, words[1:], words[2:]) if a == first and b == second]
| Solution |
python | django__django | tests/admin_views/models.py | {
"start": 8881,
"end": 9139
} | class ____(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == "_invalid":
raise ValidationError("invalid")
| Child |
python | ray-project__ray | release/nightly_tests/stress_tests/test_many_tasks.py | {
"start": 371,
"end": 9426
} | class ____(object):
def method(self, size, *xs):
return np.ones(size, dtype=np.uint8)
def ready(self):
pass
# Stage 0: Submit a bunch of small tasks with large returns.
def stage0(smoke=False):
num_tasks = 1000
size = 1000000
if smoke:
num_tasks //= 25
size //= 25
stage_0_iterations = []
start_time = time.time()
logger.info("Submitting many tasks with large returns.")
for i in range(10):
iteration_start = time.time()
logger.info("Iteration %s", i)
ray.get([f.remote(size) for _ in range(num_tasks)])
stage_0_iterations.append(time.time() - iteration_start)
return time.time() - start_time
# Stage 1: Launch a bunch of tasks.
def stage1(smoke=False):
num_tasks = 100000
if smoke:
num_tasks //= 25
stage_1_iterations = []
start_time = time.time()
logger.info("Submitting many tasks.")
for i in range(10):
iteration_start = time.time()
logger.info("Iteration %s", i)
ray.get([f.remote(0) for _ in range(num_tasks)])
stage_1_iterations.append(time.time() - iteration_start)
return time.time() - start_time, stage_1_iterations
# Launch a bunch of tasks, each with a bunch of dependencies. TODO(rkn): This
# test starts to fail if we increase the number of tasks in the inner loop from
# 500 to 1000. (approximately 615 seconds)
def stage2(smoke=False):
num_tasks_per_iteration = 500
if smoke:
num_tasks_per_iteration //= 25
stage_2_iterations = []
start_time = time.time()
logger.info("Submitting tasks with many dependencies.")
x_ids = []
for _ in range(5):
iteration_start = time.time()
for i in range(20):
logger.info(
"Iteration %s. Cumulative time %s seconds", i, time.time() - start_time
)
x_ids = [f.remote(0, *x_ids) for _ in range(num_tasks_per_iteration)]
ray.get(x_ids)
stage_2_iterations.append(time.time() - iteration_start)
logger.info("Finished after %s seconds.", time.time() - start_time)
return time.time() - start_time, stage_2_iterations
# Create a bunch of actors.
def stage3(total_num_remote_cpus, smoke=False):
start_time = time.time()
logger.info("Creating %s actors.", total_num_remote_cpus)
actors = [Actor.remote() for _ in range(total_num_remote_cpus)]
ray.get([actor.ready.remote() for actor in actors])
stage_3_creation_time = time.time() - start_time
logger.info("Finished stage 3 actor creation in %s seconds.", stage_3_creation_time)
num_tasks = 1000
if smoke:
num_tasks //= 25
# Submit a bunch of small tasks to each actor. (approximately 1070 seconds)
start_time = time.time()
logger.info("Submitting many small actor tasks.")
for N in [num_tasks, num_tasks * 100]:
x_ids = []
for i in range(N):
x_ids = [a.method.remote(0) for a in actors]
if i % 100 == 0:
logger.info("Submitted {}".format(i * len(actors)))
ray.get(x_ids)
return time.time() - start_time, stage_3_creation_time
# This tests https://github.com/ray-project/ray/issues/10150. The only way to
# integration test this is via performance. The goal is to fill up the cluster
# so that all tasks can be run, but spillback is required. Since the driver
# submits all these tasks it should easily be able to schedule each task in
# O(1) iterative spillback queries. If spillback behavior is incorrect, each
# task will require O(N) queries. Since we limit the number of inflight
# requests, we will run into head of line blocking and we should be able to
# measure this timing.
def stage4():
num_tasks = int(ray.cluster_resources()["CPU"])
logger.info("Scheduling many tasks for spillback.")
@ray.remote(num_cpus=1)
def func(t):
if t % 100 == 0:
logger.info(f"[spillback test] {t}/{num_tasks}")
start = time.perf_counter()
time.sleep(1)
end = time.perf_counter()
return start, end, ray._private.worker.global_worker.node.unique_id
results = ray.get([func.remote(i) for i in range(num_tasks)])
host_to_start_times = defaultdict(list)
for start, end, host in results:
host_to_start_times[host].append(start)
spreads = []
for host in host_to_start_times:
last = max(host_to_start_times[host])
first = min(host_to_start_times[host])
spread = last - first
spreads.append(spread)
logger.info(f"Spread: {last - first}\tLast: {last}\tFirst: {first}")
avg_spread = np.mean(spreads)
logger.info(f"Avg spread: {np.mean(spreads)}")
return avg_spread
def parse_script_args():
parser = argparse.ArgumentParser()
parser.add_argument("--num-nodes", type=int, default=100)
parser.add_argument("--smoke-test", action="store_true")
return parser.parse_known_args()
if __name__ == "__main__":
args, unknown = parse_script_args()
# These numbers need to correspond with the autoscaler config file.
# The number of remote nodes in the autoscaler should upper bound
# these because sometimes nodes fail to update.
num_remote_nodes = args.num_nodes
num_remote_cpus = 2
total_num_remote_cpus = num_remote_nodes * num_remote_cpus
is_smoke_test = args.smoke_test
result = {}
num_nodes = len(ray.nodes())
assert (
num_nodes == num_remote_nodes + 1
), f"{num_nodes}/{num_remote_nodes+1} are available"
logger.info(
"Nodes have all joined. There are %s resources.", ray.cluster_resources()
)
stage_0_time = stage0(smoke=is_smoke_test)
logger.info("Finished stage 0 after %s seconds.", stage_0_time)
result["stage_0_time"] = stage_0_time
stage_1_time, stage_1_iterations = stage1(smoke=is_smoke_test)
logger.info("Finished stage 1 after %s seconds.", stage_1_time)
result["stage_1_time"] = stage_1_time
result["stage_1_avg_iteration_time"] = sum(stage_1_iterations) / len(
stage_1_iterations
)
result["stage_1_max_iteration_time"] = max(stage_1_iterations)
result["stage_1_min_iteration_time"] = min(stage_1_iterations)
stage_2_time, stage_2_iterations = stage2(smoke=is_smoke_test)
logger.info("Finished stage 2 after %s seconds.", stage_2_time)
result["stage_2_time"] = stage_2_time
result["stage_2_avg_iteration_time"] = sum(stage_2_iterations) / len(
stage_2_iterations
)
result["stage_2_max_iteration_time"] = max(stage_2_iterations)
result["stage_2_min_iteration_time"] = min(stage_2_iterations)
stage_3_time, stage_3_creation_time = stage3(
total_num_remote_cpus, smoke=is_smoke_test
)
logger.info("Finished stage 3 in %s seconds.", stage_3_time)
result["stage_3_creation_time"] = stage_3_creation_time
result["stage_3_time"] = stage_3_time
stage_4_spread = stage4()
# avg_spread ~ 115 with Ray 1.0 scheduler. ~695 with (buggy) 0.8.7
# scheduler.
result["stage_4_spread"] = stage_4_spread
if not is_smoke_test:
result["perf_metrics"] = [
{
"perf_metric_name": "stage_0_time",
"perf_metric_value": stage_0_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": "stage_1_avg_iteration_time",
"perf_metric_value": result["stage_1_avg_iteration_time"],
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": "stage_2_avg_iteration_time",
"perf_metric_value": result["stage_2_avg_iteration_time"],
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": "stage_3_creation_time",
"perf_metric_value": result["stage_3_creation_time"],
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": "stage_3_time",
"perf_metric_value": result["stage_3_time"],
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": "stage_4_spread",
"perf_metric_value": result["stage_4_spread"],
"perf_metric_type": "LATENCY",
},
]
print("PASSED.")
# TODO(rkn): The test below is commented out because it currently
# does not pass.
# # Submit a bunch of actor tasks with all-to-all communication.
# start_time = time.time()
# logger.info("Submitting actor tasks with all-to-all communication.")
# x_ids = []
# for _ in range(50):
# for size_exponent in [0, 1, 2, 3, 4, 5, 6]:
# x_ids = [a.method.remote(10**size_exponent, *x_ids) for a
# in actors]
# ray.get(x_ids)
# logger.info("Finished after %s seconds.", time.time() - start_time)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_put:
out_put.write(json.dumps(result))
| Actor |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 1379,
"end": 1908
} | class ____(unittest.TestCase):
def test_permute(self):
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
self.assertEqual(perms, permute("abc"))
def test_permute_iter(self):
it = permute_iter("abc")
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
for i in range(len(perms)):
self.assertEqual(perms[i], next(it))
def test_angram(self):
self.assertTrue(anagram('apple', 'pleap'))
self.assertFalse(anagram("apple", "cherry"))
| TestPermuteAndAnagram |
python | Pylons__pyramid | tests/test_config/pkgs/scannable/another.py | {
"start": 1570,
"end": 1860
} | class ____:
def __call__(self, context, request):
return 'another_grokked_instance'
grokked_instance = Foo()
grokked_instance = view_config(
name='another_grokked_instance', renderer=null_renderer
)(grokked_instance)
# ungrokkable
A = 1
B = {}
def stuff():
""" """
| Foo |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/descriptor.py | {
"start": 304,
"end": 386
} | class ____(type):
"""Descriptor metaclass docstring."""
| CustomDataDescriptorMeta |
python | kamyu104__LeetCode-Solutions | Python/thousand-separator.py | {
"start": 29,
"end": 367
} | class ____(object):
def thousandSeparator(self, n):
"""
:type n: int
:rtype: str
"""
result = []
s = str(n)
for i, c in enumerate(str(n)):
if i and (len(s)-i)%3 == 0:
result.append(".")
result.append(c)
return "".join(result)
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.