language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | faif__python-patterns | tests/test_hsm.py | {
"start": 1475,
"end": 3850
} | class ____(unittest.TestCase):
"""Exemplary 2nd level state test class (here: Standby state). Add missing
state test classes..."""
@classmethod
def setUpClass(cls):
cls.hsm = HierachicalStateMachine()
def setUp(cls):
cls.hsm._current_state = Standby(cls.hsm)
def test_given_standby_on_message_switchover_shall_set_active(cls):
cls.hsm.on_message("switchover")
cls.assertEqual(isinstance(cls.hsm._current_state, Active), True)
def test_given_standby_on_message_switchover_shall_call_hsm_methods(cls):
with (
patch.object(cls.hsm, "_perform_switchover") as mock_perform_switchover,
patch.object(cls.hsm, "_check_mate_status") as mock_check_mate_status,
patch.object(
cls.hsm, "_send_switchover_response"
) as mock_send_switchover_response,
patch.object(cls.hsm, "_next_state") as mock_next_state,
):
cls.hsm.on_message("switchover")
cls.assertEqual(mock_perform_switchover.call_count, 1)
cls.assertEqual(mock_check_mate_status.call_count, 1)
cls.assertEqual(mock_send_switchover_response.call_count, 1)
cls.assertEqual(mock_next_state.call_count, 1)
def test_given_standby_on_message_fault_trigger_shall_set_suspect(cls):
cls.hsm.on_message("fault trigger")
cls.assertEqual(isinstance(cls.hsm._current_state, Suspect), True)
def test_given_standby_on_message_diagnostics_failed_shall_raise_exception_and_keep_in_state(
cls,
):
with cls.assertRaises(UnsupportedTransition):
cls.hsm.on_message("diagnostics failed")
cls.assertEqual(isinstance(cls.hsm._current_state, Standby), True)
def test_given_standby_on_message_diagnostics_passed_shall_raise_exception_and_keep_in_state(
cls,
):
with cls.assertRaises(UnsupportedTransition):
cls.hsm.on_message("diagnostics passed")
cls.assertEqual(isinstance(cls.hsm._current_state, Standby), True)
def test_given_standby_on_message_operator_inservice_shall_raise_exception_and_keep_in_state(
cls,
):
with cls.assertRaises(UnsupportedTransition):
cls.hsm.on_message("operator inservice")
cls.assertEqual(isinstance(cls.hsm._current_state, Standby), True)
| StandbyStateTest |
python | getsentry__sentry-python | sentry_sdk/sessions.py | {
"start": 4465,
"end": 9172
} | class ____:
def __init__(
self,
capture_func, # type: Callable[[Envelope], None]
flush_interval=60, # type: int
):
# type: (...) -> None
self.capture_func = capture_func
self.flush_interval = flush_interval
self.pending_sessions = [] # type: List[Any]
self.pending_aggregates = {} # type: Dict[Any, Any]
self._thread = None # type: Optional[Thread]
self._thread_lock = Lock()
self._aggregate_lock = Lock()
self._thread_for_pid = None # type: Optional[int]
self.__shutdown_requested = Event()
def flush(self):
# type: (...) -> None
pending_sessions = self.pending_sessions
self.pending_sessions = []
with self._aggregate_lock:
pending_aggregates = self.pending_aggregates
self.pending_aggregates = {}
envelope = Envelope()
for session in pending_sessions:
if len(envelope.items) == MAX_ENVELOPE_ITEMS:
self.capture_func(envelope)
envelope = Envelope()
envelope.add_session(session)
for attrs, states in pending_aggregates.items():
if len(envelope.items) == MAX_ENVELOPE_ITEMS:
self.capture_func(envelope)
envelope = Envelope()
envelope.add_sessions(make_aggregate_envelope(states, attrs))
if len(envelope.items) > 0:
self.capture_func(envelope)
def _ensure_running(self):
# type: (...) -> None
"""
Check that we have an active thread to run in, or create one if not.
Note that this might fail (e.g. in Python 3.12 it's not possible to
spawn new threads at interpreter shutdown). In that case self._running
will be False after running this function.
"""
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
with self._thread_lock:
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
def _thread():
# type: (...) -> None
running = True
while running:
running = not self.__shutdown_requested.wait(self.flush_interval)
self.flush()
thread = Thread(target=_thread)
thread.daemon = True
try:
thread.start()
except RuntimeError:
# Unfortunately at this point the interpreter is in a state that no
# longer allows us to spawn a thread and we have to bail.
self.__shutdown_requested.set()
return None
self._thread = thread
self._thread_for_pid = os.getpid()
return None
def add_aggregate_session(
self,
session, # type: Session
):
# type: (...) -> None
# NOTE on `session.did`:
# the protocol can deal with buckets that have a distinct-id, however
# in practice we expect the python SDK to have an extremely high cardinality
# here, effectively making aggregation useless, therefore we do not
# aggregate per-did.
# For this part we can get away with using the global interpreter lock
with self._aggregate_lock:
attrs = session.get_json_attrs(with_user_info=False)
primary_key = tuple(sorted(attrs.items()))
secondary_key = session.truncated_started # (, session.did)
states = self.pending_aggregates.setdefault(primary_key, {})
state = states.setdefault(secondary_key, {})
if "started" not in state:
state["started"] = format_timestamp(session.truncated_started)
# if session.did is not None:
# state["did"] = session.did
if session.status == "crashed":
state["crashed"] = state.get("crashed", 0) + 1
elif session.status == "abnormal":
state["abnormal"] = state.get("abnormal", 0) + 1
elif session.errors > 0:
state["errored"] = state.get("errored", 0) + 1
else:
state["exited"] = state.get("exited", 0) + 1
def add_session(
self,
session, # type: Session
):
# type: (...) -> None
if session.session_mode == "request":
self.add_aggregate_session(session)
else:
self.pending_sessions.append(session.to_json())
self._ensure_running()
def kill(self):
# type: (...) -> None
self.__shutdown_requested.set()
| SessionFlusher |
python | mlflow__mlflow | .claude/hooks/lint.py | {
"start": 560,
"end": 1767
} | class ____:
start: int
end: int
def overlaps(self, start: int, end: int) -> bool:
return start <= self.end and self.start <= end
def parse_diff_ranges(diff_output: str) -> list[DiffRange]:
"""Parse unified diff output and extract added line ranges."""
ranges: list[DiffRange] = []
for line in diff_output.splitlines():
if line.startswith("@@ "):
if match := re.search(r"\+(\d+)(?:,(\d+))?", line):
start = int(match.group(1))
count = int(match.group(2)) if match.group(2) else 1
ranges.append(DiffRange(start=start, end=start + count))
return ranges
def overlaps_with_diff(node: ast.Constant, ranges: list[DiffRange]) -> bool:
return any(r.overlaps(node.lineno, node.end_lineno or node.lineno) for r in ranges)
def get_docstring_node(node: FuncNode) -> ast.Constant | None:
match node.body:
case [ast.Expr(value=ast.Constant(value=str()) as const), *_]:
return const
return None
def is_redundant_docstring(node: FuncNode) -> bool:
docstring = ast.get_docstring(node)
if not docstring:
return False
return "\n" not in docstring.strip()
| DiffRange |
python | aio-libs__aiohttp | aiohttp/web_routedef.py | {
"start": 976,
"end": 1853
} | class ____(AbstractRouteDef):
method: str
path: str
handler: _HandlerType
kwargs: dict[str, Any]
def __repr__(self) -> str:
info = []
for name, value in sorted(self.kwargs.items()):
info.append(f", {name}={value!r}")
return "<RouteDef {method} {path} -> {handler.__name__!r}{info}>".format(
method=self.method, path=self.path, handler=self.handler, info="".join(info)
)
def register(self, router: UrlDispatcher) -> list[AbstractRoute]:
if self.method in hdrs.METH_ALL:
reg = getattr(router, "add_" + self.method.lower())
return [reg(self.path, self.handler, **self.kwargs)]
else:
return [
router.add_route(self.method, self.path, self.handler, **self.kwargs)
]
@dataclasses.dataclass(frozen=True, repr=False)
| RouteDef |
python | celery__celery | celery/events/state.py | {
"start": 2446,
"end": 4524
} | class ____(defaultdict):
""":class:`~collections.defaultdict` with configurable __call__.
We use this for backwards compatibility in State.tasks_by_type
etc, which used to be a method but is now an index instead.
So you can do::
>>> add_tasks = state.tasks_by_type['proj.tasks.add']
while still supporting the method call::
>>> add_tasks = list(state.tasks_by_type(
... 'proj.tasks.add', reverse=True))
"""
def __init__(self, fun, *args, **kwargs):
self.fun = fun
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.fun(*args, **kwargs)
Callable.register(CallableDefaultdict)
@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
def _warn_drift(hostname, drift, local_received, timestamp):
# we use memoize here so the warning is only logged once per hostname
warn(DRIFT_WARNING, hostname, drift,
datetime.fromtimestamp(local_received),
datetime.fromtimestamp(timestamp))
def heartbeat_expires(timestamp, freq=60,
expire_window=HEARTBEAT_EXPIRE_WINDOW,
Decimal=Decimal, float=float, isinstance=isinstance):
"""Return time when heartbeat expires."""
# some json implementations returns decimal.Decimal objects,
# which aren't compatible with float.
freq = float(freq) if isinstance(freq, Decimal) else freq
if isinstance(timestamp, Decimal):
timestamp = float(timestamp)
return timestamp + (freq * (expire_window / 1e2))
def _depickle_task(cls, fields):
return cls(**fields)
def with_unique_field(attr):
def _decorate_cls(cls):
def __eq__(this, other):
if isinstance(other, this.__class__):
return getattr(this, attr) == getattr(other, attr)
return NotImplemented
cls.__eq__ = __eq__
def __hash__(this):
return hash(getattr(this, attr))
cls.__hash__ = __hash__
return cls
return _decorate_cls
@with_unique_field('hostname')
| CallableDefaultdict |
python | getsentry__sentry | src/sentry/notifications/types.py | {
"start": 265,
"end": 1477
} | class ____(ValueEqualityEnum):
DEPLOY = "deploy"
ISSUE_ALERTS = "alerts"
WORKFLOW = "workflow"
APPROVAL = "approval"
# Notifications for when 100% reserved quota is reached
QUOTA = "quota"
# Notifications for when 80% reserved quota is reached
QUOTA_WARNINGS = "quotaWarnings"
# Notifications for when a specific threshold is reached
# If set, this overrides any notification preferences for QUOTA and QUOTA_WARNINGS.
QUOTA_THRESHOLDS = "quotaThresholds"
QUOTA_ERRORS = "quotaErrors"
QUOTA_TRANSACTIONS = "quotaTransactions"
QUOTA_ATTACHMENTS = "quotaAttachments"
QUOTA_REPLAYS = "quotaReplays"
QUOTA_MONITOR_SEATS = "quotaMonitorSeats"
QUTOA_UPTIME = "quotaUptime"
QUOTA_SPANS = "quotaSpans"
QUOTA_PROFILE_DURATION = "quotaProfileDuration"
QUOTA_PROFILE_DURATION_UI = "quotaProfileDurationUI"
QUOTA_SEER_BUDGET = "quotaSeerBudget"
QUOTA_SPEND_ALLOCATIONS = "quotaSpendAllocations"
QUOTA_LOG_BYTES = "quotaLogBytes"
QUOTA_SEER_USERS = "quotaSeerUsers"
SPIKE_PROTECTION = "spikeProtection"
MISSING_MEMBERS = "missingMembers"
REPORTS = "reports"
BROKEN_MONITORS = "brokenMonitors"
| NotificationSettingEnum |
python | getsentry__sentry | tests/sentry/data_export/test_tasks.py | {
"start": 28178,
"end": 47420
} | class ____(TestCase, SnubaTestCase, SpanTestCase, OurLogTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.org = self.create_organization()
self.project = self.create_project(organization=self.org)
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_spans_dataset_called_correctly(self, emailer: MagicMock) -> None:
spans = [
self.create_span(
{"description": "test_span"},
start_ts=before_now(minutes=10),
project=self.project,
organization=self.org,
)
]
self.store_spans(spans, is_eap=True)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["id", "description"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b"id,description"
assert b"test_span" in lines[1]
assert emailer.called
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_logs_dataset_called_correctly(self, emailer: MagicMock) -> None:
logs = [
self.create_ourlog(
{"body": "test log message", "severity_text": "INFO"},
timestamp=before_now(minutes=10),
attributes={"custom.field": "test_value"},
)
]
self.store_ourlogs(logs)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["log.body", "severity_text"],
"query": "",
"dataset": "logs",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
de = ExportedData.objects.get(id=de.id)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
with file.getfile() as f:
content = f.read().strip()
assert b"log.body,severity_text" in content
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_datasets_isolation(self, emailer: MagicMock) -> None:
spans = [
self.create_span(
{"description": "isolation_test_span"},
start_ts=before_now(minutes=10),
project=self.project,
organization=self.org,
)
]
self.store_spans(spans, is_eap=True)
logs = [
self.create_ourlog(
{"body": "isolation test log", "severity_text": "DEBUG"},
timestamp=before_now(minutes=10),
project=self.project,
organization=self.org,
)
]
self.store_ourlogs(logs)
# Test spans dataset export
de_spans = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["id", "description"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de_spans.id, batch_size=1)
de_spans = ExportedData.objects.get(id=de_spans.id)
assert de_spans.date_finished is not None
# Verify spans export contains span data but not log data
file_spans = de_spans._get_file()
assert isinstance(file_spans, File)
with file_spans.getfile() as f:
content_spans = f.read().strip()
assert b"id,description" in content_spans
assert b"isolation_test_span" in content_spans
assert b"isolation test log" not in content_spans
# Test logs dataset export
de_logs = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["log.body", "severity_text"],
"query": "",
"dataset": "logs",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de_logs.id, batch_size=1)
de_logs = ExportedData.objects.get(id=de_logs.id)
assert de_logs.date_finished is not None
# Verify logs export contains log data but not span data
file_logs = de_logs._get_file()
assert isinstance(file_logs, File)
with file_logs.getfile() as f:
content_logs = f.read().strip()
assert b"log.body,severity_text" in content_logs
assert b"isolation test log" in content_logs
assert b"isolation_test_span" not in content_logs
assert emailer.call_count == 2
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_batched(self, emailer: MagicMock) -> None:
spans = [
self.create_span(
{"description": "first_span", "sentry_tags": {"transaction": "txn1"}},
start_ts=before_now(minutes=10),
project=self.project,
organization=self.org,
),
self.create_span(
{"description": "second_span", "sentry_tags": {"transaction": "txn2"}},
start_ts=before_now(minutes=9),
project=self.project,
organization=self.org,
),
self.create_span(
{"description": "third_span", "sentry_tags": {"transaction": "txn3"}},
start_ts=before_now(minutes=8),
project=self.project,
organization=self.org,
),
]
self.store_spans(spans, is_eap=True)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["id", "description"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
# Convert raw csv to list of line-strings
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b"id,description"
# Should have data rows with our spans
assert len(lines) >= 2 # header + at least one data row
assert b"first_span" in content or b"second_span" in content or b"third_span" in content
assert emailer.called
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_respects_selected_environment(self, emailer: MagicMock) -> None:
self.create_environment(name="prod", organization=self.org)
# Create log data
logs = [
self.create_ourlog(
{"body": "production log", "severity_text": "ERROR"},
timestamp=before_now(minutes=10),
attributes={"environment": "prod"},
organization=self.org,
project=self.project,
)
]
self.store_ourlogs(logs)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"environment": "prod",
"field": ["log.body", "severity_text"],
"query": "",
"dataset": "logs",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
# Convert raw csv to list of line-strings
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b"log.body,severity_text"
# Should have data rows with our logs
assert len(lines) >= 2
assert b"production log" in content
assert b"ERROR" in content
assert emailer.called
@patch("sentry.data_export.models.ExportedData.email_failure")
def test_explore_missing_environment(self, emailer: MagicMock) -> None:
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"environment": "fake_environment",
"field": ["span_id"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
error = emailer.call_args[1]["message"]
assert error == "Requested environment does not exist"
@patch("sentry.data_export.models.ExportedData.email_failure")
def test_explore_missing_project(self, emailer: MagicMock) -> None:
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [-1],
"field": ["span_id"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id)
error = emailer.call_args[1]["message"]
assert error == "Requested project does not exist"
@patch("sentry.data_export.tasks.MAX_FILE_SIZE", 55)
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_export_file_too_large(self, emailer: MagicMock) -> None:
spans = [
self.create_span(
{"description": "test", "sentry_tags": {"transaction": "test_txn"}},
start_ts=before_now(minutes=10),
project=self.project,
organization=self.org,
)
for _ in range(5)
]
self.store_spans(spans, is_eap=True)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["id", "description"],
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=1)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
# Verify CSV content
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b""
# raising ExportDataFileTooBig returns 0
assert len(lines) >= 1
assert emailer.called
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_export_too_many_rows(self, emailer: MagicMock) -> None:
logs = [
self.create_ourlog(
{"body": f"test log {i}", "severity_text": "INFO"},
timestamp=before_now(minutes=10 - i),
organization=self.org,
project=self.project,
)
for i in range(5)
]
self.store_ourlogs(logs)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["log.body", "severity_text"],
"query": "",
"dataset": "logs",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
# Limit export to 2 rows
with self.tasks():
assemble_download(de.id, export_limit=2)
de = ExportedData.objects.get(id=de.id)
assert de.date_finished is not None
assert de.date_expired is not None
assert de.file_id is not None
file = de._get_file()
assert isinstance(file, File)
assert file.headers == {"Content-Type": "text/csv"}
assert file.size is not None
assert file.checksum is not None
# Verify CSV content respects row limit
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b"log.body,severity_text"
assert len(lines) == 3 # header + up to 2 data rows
assert emailer.called
@patch("sentry.data_export.models.ExportedData.email_success")
def test_explore_sort(self, emailer: MagicMock) -> None:
spans = [
self.create_span(
{"description": "span_alpha", "sentry_tags": {"transaction": "zeta_txn"}},
start_ts=before_now(minutes=10),
project=self.project,
organization=self.org,
),
self.create_span(
{"description": "span_beta", "sentry_tags": {"transaction": "alpha_txn"}},
start_ts=before_now(minutes=9),
project=self.project,
organization=self.org,
),
]
self.store_spans(spans, is_eap=True)
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["id", "description", "transaction"],
"sort": ["transaction"], # Sort by description descending
"query": "",
"dataset": "spans",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
with self.tasks():
assemble_download(de.id, batch_size=5)
de = ExportedData.objects.get(id=de.id)
file = de._get_file()
assert isinstance(file, File)
# Convert raw csv to list of line-strings
with file.getfile() as f:
content = f.read().strip()
lines = content.split(b"\r\n")
assert lines[0] == b"id,description,transaction"
assert b"alpha_txn" in lines[1]
assert b"zeta_txn" in lines[2]
assert emailer.called
@patch("sentry.snuba.ourlogs.OurLogs.run_table_query")
@patch("sentry.data_export.models.ExportedData.email_failure")
def test_explore_outside_retention(
self, emailer: MagicMock, mock_logs_query: MagicMock
) -> None:
de = ExportedData.objects.create(
user_id=self.user.id,
organization=self.org,
query_type=ExportQueryType.EXPLORE,
query_info={
"project": [self.project.id],
"field": ["log.body"],
"query": "",
"dataset": "logs",
"start": before_now(minutes=15).isoformat(),
"end": before_now(minutes=5).isoformat(),
},
)
mock_logs_query.side_effect = QueryOutsideRetentionError("test")
with self.tasks():
assemble_download(de.id)
error = emailer.call_args[1]["message"]
assert error == "Invalid date range. Please try a more recent date range."
# Test with unicode error
mock_logs_query.side_effect = QueryOutsideRetentionError("\xfc")
with self.tasks():
assemble_download(de.id)
error = emailer.call_args[1]["message"]
assert error == "Invalid date range. Please try a more recent date range."
| AssembleDownloadExploreTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 13204,
"end": 15650
} | class ____(NonStrictDataModel):
"""
:param preview: Description or textual data
:type preview: str
:param content_type: System defined raw data content type
:type content_type: str
:param data_hash: Hash of raw data, without any headers or descriptive parts
:type data_hash: str
"""
_schema = {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
preview: Optional[str] = None,
content_type: Optional[str] = None,
data_hash: Optional[str] = None,
**kwargs: Any
) -> None:
super(ArtifactTypeData, self).__init__(**kwargs)
self.preview = preview
self.content_type = content_type
self.data_hash = data_hash
@schema_property("preview")
def preview(self) -> Optional[str]:
return self._property_preview
@preview.setter
def preview(self, value: Optional[str]) -> None:
if value is None:
self._property_preview = None
return
self.assert_isinstance(value, "preview", six.string_types)
self._property_preview = value
@schema_property("content_type")
def content_type(self) -> Optional[str]:
return self._property_content_type
@content_type.setter
def content_type(self, value: Optional[str]) -> None:
if value is None:
self._property_content_type = None
return
self.assert_isinstance(value, "content_type", six.string_types)
self._property_content_type = value
@schema_property("data_hash")
def data_hash(self) -> Optional[str]:
return self._property_data_hash
@data_hash.setter
def data_hash(self, value: Optional[str]) -> None:
if value is None:
self._property_data_hash = None
return
self.assert_isinstance(value, "data_hash", six.string_types)
self._property_data_hash = value
| ArtifactTypeData |
python | ansible__ansible | test/units/parsing/vault/test_vault.py | {
"start": 13883,
"end": 15135
} | class ____(unittest.TestCase):
def test_randomname(self):
filename = 'randomname'
res = vault.script_is_client(filename)
self.assertFalse(res)
def test_something_dash_client(self):
filename = 'something-client'
res = vault.script_is_client(filename)
self.assertTrue(res)
def test_something_dash_client_somethingelse(self):
filename = 'something-client-somethingelse'
res = vault.script_is_client(filename)
self.assertFalse(res)
def test_something_dash_client_py(self):
filename = 'something-client.py'
res = vault.script_is_client(filename)
self.assertTrue(res)
def test_full_path_something_dash_client_py(self):
filename = '/foo/bar/something-client.py'
res = vault.script_is_client(filename)
self.assertTrue(res)
def test_full_path_something_dash_client(self):
filename = '/foo/bar/something-client'
res = vault.script_is_client(filename)
self.assertTrue(res)
def test_full_path_something_dash_client_in_dir(self):
filename = '/foo/bar/something-client/but/not/filename'
res = vault.script_is_client(filename)
self.assertFalse(res)
| TestScriptIsClient |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 3022,
"end": 3350
} | class ____(MixpanelHttpRequester):
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
return {}
| AnnotationsHttpRequester |
python | TheAlgorithms__Python | data_structures/hashing/double_hash.py | {
"start": 774,
"end": 2736
} | class ____(HashTable):
"""
Hash Table example with open addressing and Double Hash
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __hash_function_2(self, value, data):
next_prime_gt = (
next_prime(value % self.size_table)
if not is_prime(value % self.size_table)
else value % self.size_table
) # gt = bigger than
return next_prime_gt - (data % next_prime_gt)
def __hash_double_function(self, key, data, increment):
return (increment * self.__hash_function_2(key, data)) % self.size_table
def _collision_resolution(self, key, data=None):
"""
Examples:
1. Try to add three data elements when the size is three
>>> dh = DoubleHash(3)
>>> dh.insert_data(10)
>>> dh.insert_data(20)
>>> dh.insert_data(30)
>>> dh.keys()
{1: 10, 2: 20, 0: 30}
2. Try to add three data elements when the size is two
>>> dh = DoubleHash(2)
>>> dh.insert_data(10)
>>> dh.insert_data(20)
>>> dh.insert_data(30)
>>> dh.keys()
{10: 10, 9: 20, 8: 30}
3. Try to add three data elements when the size is four
>>> dh = DoubleHash(4)
>>> dh.insert_data(10)
>>> dh.insert_data(20)
>>> dh.insert_data(30)
>>> dh.keys()
{9: 20, 10: 10, 8: 30}
"""
i = 1
new_key = self.hash_function(data)
while self.values[new_key] is not None and self.values[new_key] != key:
new_key = (
self.__hash_double_function(key, data, i)
if self.balanced_factor() >= self.lim_charge
else None
)
if new_key is None:
break
else:
i += 1
return new_key
if __name__ == "__main__":
import doctest
doctest.testmod()
| DoubleHash |
python | pytorch__pytorch | test/test_determination.py | {
"start": 127,
"end": 169
} | class ____:
verbose = False
| DummyOptions |
python | google__pytype | pytype/metrics_test.py | {
"start": 3934,
"end": 4976
} | class ____(unittest.TestCase):
"""Tests for MapCounter."""
def setUp(self):
super().setUp()
metrics._prepare_for_test()
def test_enabled(self):
c = metrics.MapCounter("foo")
# Check contents of an empty map.
self.assertEqual(0, c._total)
# Increment a few values and check again.
c.inc("x")
c.inc("y", 2)
c.inc("x", 5)
self.assertEqual(8, c._total)
self.assertDictEqual(dict(x=6, y=2), c._counts)
self.assertEqual("foo: 8 {x=6, y=2}", str(c))
def test_disabled(self):
metrics._prepare_for_test(enabled=False)
c = metrics.MapCounter("foo")
c.inc("x")
self.assertEqual(0, c._total)
def test_merge(self):
c = metrics.MapCounter("foo")
c.inc("x")
c.inc("y", 2)
# Cheat a little by merging a counter with a different name.
other = metrics.MapCounter("other")
other.inc("x")
other.inc("z")
c._merge(other)
# Check merged contents.
self.assertEqual(5, c._total)
self.assertDictEqual(dict(x=2, y=2, z=1), c._counts)
| MapCounterTest |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/pg_theano_random.py | {
"start": 716,
"end": 1266
} | class ____:
def __init__(self, M1, M2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
W = np.zeros((M1, M2))
else:
W = np.random.randn(M1, M2) * np.sqrt(2 / M1)
self.W = theano.shared(W)
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(M2))
self.params += [self.b]
self.f = f
def forward(self, X):
if self.use_bias:
a = X.dot(self.W) + self.b
else:
a = X.dot(self.W)
return self.f(a)
# approximates pi(a | s)
| HiddenLayer |
python | wandb__wandb | wandb/automations/events.py | {
"start": 13579,
"end": 14068
} | class ____:
alias = FilterableField()
MetricThresholdFilter.model_rebuild()
RunMetricFilter.model_rebuild()
_WrappedSavedEventFilter.model_rebuild()
OnLinkArtifact.model_rebuild()
OnAddArtifactAlias.model_rebuild()
OnCreateArtifact.model_rebuild()
OnRunMetric.model_rebuild()
__all__ = [
"EventType",
*(nameof(cls) for cls in InputEventTypes),
"RunEvent",
"ArtifactEvent",
"MetricThresholdFilter",
"MetricChangeFilter",
"MetricZScoreFilter",
]
| ArtifactEvent |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_endpoints/test_user_endpoint.py | {
"start": 8669,
"end": 9417
} | class ____(TestUserEndpoint):
def test_should_response_200(self):
response = self.client.get("/fab/v1/users", environ_overrides={"REMOTE_USER": "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 2
usernames = [user["username"] for user in response.json["users"] if user]
assert usernames == ["test", "test_no_permissions"]
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/fab/v1/users")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get("/fab/v1/users", environ_overrides={"REMOTE_USER": "test_no_permissions"})
assert response.status_code == 403
| TestGetUsers |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py | {
"start": 29683,
"end": 32649
} | class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: Marketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {}
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"marketer_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for fetched in response.json().get("campaignResults"):
for x in fetched.get("results"):
x["marketer_id"] = stream_slice["marketer_id"]
x["campaign_id"] = fetched.get("campaignId")
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
stream_start, stream_end = self._get_time_interval(self.config.get("start_date"), self.config.get("end_date"))
stream_conversion_count = self._get_bool_conversion_count_by_click_date(
self.config.get("conversion_count", DEFAULT_REPORT_CONVERSION_COUNT_BY_CLICK_DATE)
)
return (
f"reports/marketers/{stream_slice['marketer_id']}/campaigns/publishers?from="
+ str(stream_start.date())
+ "&to="
+ str(stream_end.date())
+ "&limit=500"
+ "&includeVideoStats=true"
+ "&conversionsByClickDate="
+ str(stream_conversion_count)
)
# Retrieve performance statistics for a Marketer by platform.
# The API in this sub-section allows retrieving performance statistics by platform at different levels: marketer, budget, and campaign.
| PerformanceReportPublishersByCampaigns |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial004_py310.py | {
"start": 204,
"end": 603
} | class ____(BaseModel):
username: str
full_name: str | None = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item,
user: User,
importance: int = Body(gt=0),
q: str | None = None,
):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
if q:
results.update({"q": q})
return results
| User |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 1069,
"end": 1586
} | class ____(BaseEvent):
"""
LLMStructuredPredictStartEvent.
Args:
output_cls (Any): Output class to predict.
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
output_cls: Any
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictStartEvent"
| LLMStructuredPredictStartEvent |
python | spack__spack | lib/spack/spack/environment/list.py | {
"start": 10391,
"end": 10492
} | class ____(SpackError):
"""Error class for all errors related to SpecList objects."""
| SpecListError |
python | dateutil__dateutil | src/dateutil/tz/win.py | {
"start": 6640,
"end": 8730
} | class ____(tzwinbase):
"""
Time zone object created from the zone info in the Windows registry
These are similar to :py:class:`dateutil.tz.tzrange` objects in that
the time zone data is provided in the format of a single offset rule
for either 0 or 2 time zone transitions per year.
:param: name
The name of a Windows time zone key, e.g. "Eastern Standard Time".
The full list of keys can be retrieved with :func:`tzwin.list`.
"""
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
| tzwin |
python | dask__distributed | distributed/utils_test.py | {
"start": 66813,
"end": 67520
} | class ____(Worker):
"""A Worker that sets event `in_get_data` the first time it enters the get_data
method and then does not answer the comms, thus leaving the task(s) in flight
indefinitely, until the test sets `block_get_data`
See also
--------
BarrierGetData
BlockedGatherDep
BlockedExecute
"""
def __init__(self, *args, **kwargs):
self.in_get_data = asyncio.Event()
self.block_get_data = asyncio.Event()
super().__init__(*args, **kwargs)
async def get_data(self, comm, *args, **kwargs):
self.in_get_data.set()
await self.block_get_data.wait()
return await super().get_data(comm, *args, **kwargs)
| BlockedGetData |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/trace_type_test.py | {
"start": 15940,
"end": 16784
} | class ____(test.TestCase):
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testGeneric(self):
trace_type.from_value(1)
trace_type.from_value(DummyGenericClass())
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testTensor(self):
tensor = array_ops.zeros([10])
trace_type.from_value(tensor)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testTuple(self):
trace_type.from_value((1, 2, 3))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDict(self):
trace_type.from_value({1: 1, 2: 2, 3: 3})
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testList(self):
trace_type.from_value([1, 2, 3])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testAttrs(self):
trace_type.from_value(TestAttrsClass(1, 2))
| TraceTypeMemoryTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 101728,
"end": 104482
} | class ____(OperatorExpression[_T]):
"""Describe a list of clauses, separated by an operator,
in a column expression context.
:class:`.ExpressionClauseList` differs from :class:`.ClauseList` in that
it represents a column-oriented DQL expression only, not an open ended
list of anything comma separated.
.. versionadded:: 2.0
"""
__visit_name__ = "expression_clauselist"
_traverse_internals: _TraverseInternalsType = [
("clauses", InternalTraversal.dp_clauseelement_tuple),
("operator", InternalTraversal.dp_operator),
]
clauses: typing_Tuple[ColumnElement[Any], ...]
group: bool
def __init__(
self,
operator: OperatorType,
*clauses: _ColumnExpressionArgument[Any],
type_: Optional[_TypeEngineArgument[_T]] = None,
):
self.operator = operator
self.clauses = tuple(
coercions.expect(
roles.ExpressionElementRole, clause, apply_propagate_attrs=self
)
for clause in clauses
)
self._is_implicitly_boolean = operators.is_boolean(self.operator)
self.type = type_api.to_instance(type_) # type: ignore
@property
def _flattened_operator_clauses(
self,
) -> typing_Tuple[ColumnElement[Any], ...]:
return self.clauses
def __iter__(self) -> Iterator[ColumnElement[Any]]:
return iter(self.clauses)
def __len__(self) -> int:
return len(self.clauses)
@property
def _select_iterable(self) -> _SelectIterable:
return (self,)
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def _append_inplace(self, clause: ColumnElement[Any]) -> None:
self.clauses += (clause,)
@classmethod
def _construct_for_list(
cls,
operator: OperatorType,
type_: TypeEngine[_T],
*clauses: ColumnElement[Any],
group: bool = True,
) -> ExpressionClauseList[_T]:
self = cls.__new__(cls)
self.group = group
if group:
self.clauses = tuple(
c.self_group(against=operator) for c in clauses
)
else:
self.clauses = clauses
self.operator = operator
self.type = type_
for c in clauses:
if c._propagate_attrs:
self._propagate_attrs = c._propagate_attrs
break
return self
def _negate(self) -> Any:
grouped = self.self_group(against=operators.inv)
assert isinstance(grouped, ColumnElement)
return UnaryExpression(grouped, operator=operators.inv)
| ExpressionClauseList |
python | pytorch__pytorch | test/distributed/test_dynamo_distributed.py | {
"start": 55176,
"end": 85702
} | class ____(DynamoDistributedSingleProcTestCase):
"""
Test harness initializes dist process group.
Test simple things here since they are simpler to debug.
Use TestMultiProc for things that really need to run on multiple nodes
"""
device_type = (
acc.type if (acc := torch.accelerator.current_accelerator()) else "cpu"
)
def get_model(
self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None
):
m = ToyModel(
in_feat=in_feat,
hidden_feat=hidden_feat,
out_feat=out_feat,
ctx_manager=ctx_manager,
).to(self.device)
m.apply(init_weights)
inputs = torch.rand(bsz, in_feat).to(self.device)
outputs = m(inputs)
return m, inputs, outputs
@patch.object(config, "optimize_ddp", False)
def test_ddp_baseline_aot_eager(self):
from torch.nn.parallel import DistributedDataParallel as DDP
m, inputs, correct_outputs = self.get_model()
ddp_m = DDP(m, device_ids=self.device_ids)
ddp_m = torch.compile(ddp_m, backend="aot_eager")
outputs = ddp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", False)
def test_ddp_baseline_inductor(self):
from torch.nn.parallel import DistributedDataParallel as DDP
m, inputs, correct_outputs = self.get_model()
ddp_m = DDP(m, device_ids=self.device_ids)
ddp_m = torch.compile(ddp_m, backend="inductor")
outputs = ddp_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
@patch.object(config, "optimize_ddp", True)
def test_graph_split(self):
assert config.optimize_ddp
"""
Just ensures that the appropriate number of splits happen (based on
bucket size and model parameters) - verifies the number of times
the user-provided compiler is called by the DDPOptimizer which is
doing the graph splitting
"""
m, inputs, correct_outputs = self.get_model()
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
check_splits_compiler = CheckSplitsCompiler()
@torch.compile(backend=check_splits_compiler.compile_fn)
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
self.assertEqual(check_splits_compiler.compiler_called, 3)
# ensure compatibility with dynamo explain
explain_out = torch._dynamo.explain(ddp_m)(inputs)
break_reasons = explain_out.break_reasons
self.assertEqual(len(break_reasons), 3)
self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
@patch.object(config, "optimize_ddp", True)
def test_graph_split_ctx_manager(self):
"""
Ensures that we get the right number of splits and that the respective
context managers' effects are applied to the computation.
"""
for get_compiler in [
lambda: CheckSplitsCompiler(),
lambda: None,
]:
for ctx_manager, output_test in [
(
lambda: torch.autocast(
torch.device(self.device).type, torch.float16
),
lambda out: self.assertEqual(out.dtype, torch.float16),
),
(torch.enable_grad, lambda out: self.assertTrue(out.requires_grad)),
(torch.no_grad, lambda out: self.assertTrue(not out.requires_grad)),
]:
m, inputs, correct_outputs = self.get_model(
out_feat=1000,
hidden_feat=1000,
in_feat=1000,
ctx_manager=ctx_manager,
)
# inp - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
# hidden - 1000 * 1000 matrix of float32 (4 bytes) = 4MB
bucket_cap_mb = 3.5 # 4MB
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=bucket_cap_mb)
compiler = get_compiler()
@torch.compile(backend=compiler.compile_fn if compiler else "aot_eager")
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
if compiler:
self.assertEqual(compiler.compiler_called, 4)
output_test(opt_outputs)
# ensure compatibility with dynamo explain
explain_out = torch._dynamo.explain(ddp_m)(inputs)
break_reasons = explain_out.break_reasons
self.assertEqual(len(break_reasons), 4)
self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
@skipIfXpu # XPU device doesn't support flex_attention yet.
@patch.object(config, "optimize_ddp", True)
def test_compiled_flex_attention_full_model_ddp(self):
class Model(torch.nn.Module):
def __init__(self, S, H, D):
super().__init__()
self.S = S
self.H = H
self.D = D
alibi_bias = self.generate_alibi_bias(H)
self.register_buffer("alibi_bias", alibi_bias, persistent=True)
self.attention = flex_attention
self.project_qk = torch.nn.Linear(H * D, H * D * 2)
self.project_v = torch.nn.Linear(H * D, H * D)
def forward(self, hidden_states):
batch_size, _, _ = hidden_states.size()
query, key = self.project_qk(hidden_states).chunk(2, dim=2)
query = query.view(self.S, batch_size, self.H, self.D)
query = query.permute(1, 2, 0, 3)
key = key.view(self.S, batch_size, self.H, self.D)
key = key.permute(1, 2, 0, 3)
value = self.project_v(hidden_states)
value = value.view(self.S, batch_size, self.H, self.D)
value = value.permute(1, 2, 0, 3)
return self.attention(query, key, value, score_mod=self.alibi_score_mod)
def generate_alibi_bias(self, num_heads):
alibi_bias = [-((i + 1) * 8.0) / num_heads for i in range(num_heads)]
return torch.tensor(alibi_bias)
def alibi_score_mod(self, score, b, h, q_idx, kv_idx):
bias = (q_idx - kv_idx) * self.alibi_bias[h]
return score + bias
B = 16
H = 12
S = 512
D = 64
model = Model(S, H, D)
model.to(self.device_type)
model = torch.compile(model)
model = DDP(model, device_ids=self.device_ids)
hidden_states = torch.randn(B, S, H * D).to(self.device_type)
model(hidden_states)
torch.accelerator.synchronize()
@skipIfXpu # XPU device doesn't support flex_attention yet.
@patch.object(config, "optimize_ddp", True)
def test_compiled_flex_attention_local_ddp(self):
class Model(torch.nn.Module):
def __init__(self, S, H, D):
super().__init__()
self.S = S
self.H = H
self.D = D
alibi_bias = self.generate_alibi_bias(H)
self.register_buffer("alibi_bias", alibi_bias, persistent=True)
self.attention = torch.compile(flex_attention)
self.project_qk = torch.nn.Linear(H * D, H * D * 2)
self.project_v = torch.nn.Linear(H * D, H * D)
def forward(self, hidden_states):
batch_size, _, _ = hidden_states.size()
query, key = self.project_qk(hidden_states).chunk(2, dim=2)
query = query.view(self.S, batch_size, self.H, self.D)
query = query.permute(1, 2, 0, 3)
key = key.view(self.S, batch_size, self.H, self.D)
key = key.permute(1, 2, 0, 3)
value = self.project_v(hidden_states)
value = value.view(self.S, batch_size, self.H, self.D)
value = value.permute(1, 2, 0, 3)
return self.attention(query, key, value, score_mod=self.alibi_score_mod)
def generate_alibi_bias(self, num_heads):
alibi_bias = [-((i + 1) * 8.0) / num_heads for i in range(num_heads)]
return torch.tensor(alibi_bias)
def alibi_score_mod(self, score, b, h, q_idx, kv_idx):
bias = (q_idx - kv_idx) * self.alibi_bias[h]
return score + bias
B = 16
H = 12
S = 512
D = 64
model = Model(S, H, D)
model.to(self.device_type)
model = torch.compile(model)
model = DDP(model, device_ids=self.device_ids)
hidden_states = torch.randn(B, S, H * D).to(self.device_type)
model(hidden_states)
torch.accelerator.synchronize()
@patch.object(config, "optimize_ddp", True)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor(self):
assert config.optimize_ddp
"""
Same as above, but using inductor backend.
We observed issues with inductor/fx interface in the past.
"""
m, inputs, correct_outputs = self.get_model()
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
@torch.compile(backend="inductor")
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
@torch._inductor.config.patch(
{"layout_optimization": True, "keep_output_stride": False}
)
@patch.object(config, "optimize_ddp", True)
def _test_graph_split_inductor_layout_optimizations_impl(self, context):
assert config.optimize_ddp
channel_dim = 512
# channel dim must be > 64 for inductor to do layout optimization and use NHWC
class ToyModelConv(nn.Module):
def __init__(self) -> None:
super().__init__()
self.net = nn.Sequential(
*[
nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
nn.ReLU(),
]
+ [
nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
nn.ReLU(),
]
+ [
nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
nn.ReLU(),
]
+ [
nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False),
nn.ReLU(),
]
)
def forward(self, inputs):
return self.net(inputs)
def get_model():
m = ToyModelConv().to(self.device)
m.apply(init_weights)
inputs = torch.rand(2, channel_dim, channel_dim, 128).to(self.device)
outputs = m(inputs)
return m, inputs, outputs
with context():
m, inputs, correct_outputs = get_model()
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
@torch.compile(backend="inductor")
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_training(self):
self._test_graph_split_inductor_layout_optimizations_impl(
contextlib.nullcontext
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_layout_optimizations_inference(self):
self._test_graph_split_inductor_layout_optimizations_impl(torch.no_grad)
@patch.object(config, "optimize_ddp", True)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_graph_split_inductor_transpose(self):
assert config.optimize_ddp
B = 100
N = 30
D = 50
K = 70
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear0 = nn.Linear(N, K)
self.linear1 = torch.nn.Linear(D * K, 2048)
def forward(self, x):
xt = x.transpose(2, 1)
xt = self.linear0(xt).flatten(1)
return self.linear1(xt)
mod = Foo().to(self.device)
compiled_mod = torch.compile(mod, backend="inductor")
ddp_compiled_mod = DDP(compiled_mod, device_ids=self.device_ids)
x = torch.randn((B, N, D), dtype=torch.float32, device=self.device)
self.assertTrue(same(mod(x), ddp_compiled_mod(x)))
x_1 = torch.randn((B * 2, N, D), dtype=torch.float32, device=self.device)
self.assertTrue(same(mod(x_1), ddp_compiled_mod(x_1)))
x_2 = torch.randn((B * 3, N, D), dtype=torch.float32, device=self.device)
self.assertTrue(same(mod(x_2), ddp_compiled_mod(x_2)))
@patch.object(config, "optimize_ddp", True)
def test_no_split(self):
"""
Ensures the DDPOptimizer returns a correct, compiled module without
introducing graph splits. (Based on model parameters fitting in the bucket)
"""
# DDP will always do a 'first bucket' with a really small size; so only a tiny model will escape this
m, inputs, correct_outputs = self.get_model(hidden_feat=5)
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=250)
check_splits_compiler = CheckSplitsCompiler()
@torch.compile(backend=check_splits_compiler.compile_fn)
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
self.assertEqual(check_splits_compiler.compiler_called, 1)
@patch.object(config, "optimize_ddp", True)
def test_aot_autograd(self):
"""
Explicitly check AotAutograd family of compilers work,
since they require example inputs propagated between graph splits.
"""
m, inputs, correct_outputs = self.get_model()
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
@torch.compile(backend="aot_eager")
def opt_fn(inputs):
return ddp_m(inputs)
opt_outputs = opt_fn(inputs)
opt_outputs.sum().backward()
self.assertTrue(same(correct_outputs, opt_outputs))
@patch.object(config, "optimize_ddp", True)
def test_custom_layer(self):
"""
Just ensures that the appropriate number of splits happen (based on
bucket size and model parameters) - verifies the number of times
the user-provided compiler is called by the DDPOptimizer which is
doing the graph splitting
"""
m, inputs, correct_outputs = get_custom_model(self.device)
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=1)
check_splits_compiler = CheckSplitsCompiler()
@torch.compile(backend=check_splits_compiler.compile_fn)
def opt_fn(inputs):
return ddp_m(*inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
self.assertEqual(check_splits_compiler.compiler_called, 3)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_empty_graph_inductor(self):
def fn():
get_world_size = torch.distributed.distributed_c10d.get_world_size()
return (get_world_size,)
opt_fn = torch.compile(fn, backend="inductor")
res = None
try:
res = opt_fn()[0]
except Exception:
pass
self.assertEqual(res, 1)
@patch.object(config, "optimize_ddp", False)
def test_ignored_parameters(self):
"""
Verifies ddp graph-split logic ignores parameters marked to ignore on DDP module.
Hooks up graph-split optimizer manually so it can peek at internal state.
"""
m, inputs, correct_outputs = get_custom_model(self.device)
parameters_to_ignore = ["seq.2.weight", "seq.4.linear.bias"]
DDP._set_params_and_buffers_to_ignore_for_model(m, parameters_to_ignore)
ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25)
parameter_ids_to_ignore = [
id(ddp_m.module.get_parameter(p)) for p in ddp_m.parameters_to_ignore
]
check_splits_compiler = CheckSplitsCompiler()
ddp_optimizer = DDPOptimizer(
bucket_bytes_cap=ddp_m.bucket_bytes_cap,
backend_compile_fn=check_splits_compiler.compile_fn,
)
@torch.compile(backend=ddp_optimizer.compile_fn)
def opt_fn(inputs):
return ddp_m(*inputs)
opt_outputs = opt_fn(inputs)
self.assertTrue(same(correct_outputs, opt_outputs))
self.assertEqual(check_splits_compiler.compiler_called, 2)
for b in ddp_optimizer.buckets:
for p_id in b.param_ids:
self.assertFalse(p_id in parameter_ids_to_ignore)
@patch.object(config, "optimize_ddp", True)
def test_higher_order_op(self):
from torch.utils.checkpoint import checkpoint
N = 1000
class InnerModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(N, N)
self.linear2 = torch.nn.Linear(N, N)
def forward(self, x):
a = self.linear1(x)
a = self.linear2(a)
return a
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.inner_mod1 = InnerModule()
self.inner_mod2 = InnerModule()
def forward(self, x):
a = checkpoint(self.inner_mod1, x, use_reentrant=False)
a = torch.cos(a)
a = checkpoint(self.inner_mod2, a, use_reentrant=False)
a = torch.cos(a)
return a
mod = MockModule().to(self.device_type)
mod = DDP(mod, bucket_cap_mb=1)
x = torch.randn(N, N, device=self.device_type, requires_grad=True)
args = (x,)
backend = "aot_eager"
cnt = torch._dynamo.testing.CompileCounterWithBackend(backend)
torch.compile(mod, backend=cnt)(*args)
def test_fsdp_orig_params_assert(self):
# Test with basic FSDP wrapping (outer wrap around whole model)
m, inputs, _ = get_model(f"{self.device_type}:{self.rank}")
fsdp_m = FSDP(m, use_orig_params=False)
# Test is that this function call does not throw an exception.
fsdp_m = torch.compile(fsdp_m)
def test_fsdp_skip_guards(self):
"""
It's currently difficult to test dynamo guards. Most guards tests are indirect- modify something and
observe that the guard in question failed. In this case, since the FSDP guards were already deemed
useless and skipping them is expected to have no practical effect, it's pretty contrived to even try to
make those guards fail. Instead, we observe the 'guard source' printed by dynamo's comptime print_guards
function.
Note: comptime prints the guards before the time they get installed or not installed, so in both cases
(skip or no skip) the same guards get printed. The difference is that in the skip case, they show up
with a special 'guard source' which will cause them to not be installed. So all we check for is the expected
guard source 'local_fsdp_module'.
"""
global GUARDS_FILE
GUARDS_FILE = StringIO()
for skip_guards, expected_guard_source in (
(True, "local_fsdp_module"),
(False, "local_unspecialized_nn_module"),
):
torch._dynamo.reset()
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5):
super().__init__()
self.net = nn.Sequential(
*[nn.Linear(in_feat, hidden_feat), nn.ReLU()]
+ [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()]
+ [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()]
+ [nn.Linear(hidden_feat, out_feat), nn.ReLU()]
)
def forward(self, inputs):
out = self.net(inputs)
@comptime
def _(ctx):
ctx.print_guards(file=GUARDS_FILE)
return out
device = f"{self.device_type}:{self.rank}"
m = ToyModel(
in_feat=10,
hidden_feat=5000,
out_feat=5,
).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
fsdp_m = FSDP(m, use_orig_params=True)
with torch._dynamo.config.patch(skip_fsdp_guards=skip_guards):
opt_m = torch.compile(fsdp_m, backend="aot_eager")
outputs = opt_m(inputs)
# far from an exhaustive check of all the expected guards, just check a couple of them.
FileCheck().check("""local "L['self']" TYPE_MATCH""").check(
f"""{expected_guard_source} "L['self']._modules['net']" TYPE_MATCH"""
).check(
f"""{expected_guard_source} "L['self']._modules['net']._modules['0']" TYPE_MATCH"""
).run(GUARDS_FILE.getvalue())
self.assertTrue(same(correct_outputs, outputs))
def test_fsdp_skip_register_attr_or_module(self):
"""
ensure FSDP module is not registered as attributes
in the fx graph
see `not source.guard_source().is_fsdp_module()`
before calling `register_attr_or_module`
in variables/builder.py
"""
class ToyModel(nn.Module):
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5):
super().__init__()
self.net = nn.Sequential(
*[nn.Linear(in_feat, hidden_feat), nn.ReLU()]
+ [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()]
)
def forward(self, inputs):
out = self.net(inputs)
return out
torch._dynamo.reset()
device = f"{self.device_type}:{self.rank}"
m = ToyModel(
in_feat=10,
hidden_feat=5000,
out_feat=5,
).to(device)
inputs = torch.rand(20, 10).to(device)
m.apply(init_weights)
correct_outputs = m(inputs)
fsdp_m = FSDP(m, use_orig_params=True)
def debug_compiler(gm, _):
for node in gm.graph.nodes:
if node.op == "get_attr":
for name in [
"l__self___net_0_weight",
"l__self___net_0_bias",
"l__self___net_2_weight",
"l__self___net_2_bias",
]:
self.assertFalse(
name in node.name,
f"FSDP module {name} should not be registered as attributes",
)
return gm
opt_m = torch.compile(fsdp_m, backend=debug_compiler)
outputs = opt_m(inputs)
self.assertTrue(same(correct_outputs, outputs))
def test_fsdp_dup_tensors_same_source(self):
"""
Tests that FSDP-managed modules' parameters and buffers with the same
source are de-duplicated, meaning that they are each only passed once
as a graph input.
"""
class DuplicateModule(nn.Module):
def __init__(self) -> None:
super().__init__()
device_type = (
acc.type
if (acc := torch.accelerator.current_accelerator())
else "cpu"
)
self._param = torch.randn((3,), device=device_type)
self._buf = torch.nn.Buffer(
torch.randn((3,), requires_grad=False, device=device_type)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Use `_param` and `_buf` each twice in this compiled forward
# to exercise if they are de-duplicated by TorchDynamo
z = x + self._buf + self._buf
z += self._param + self._param
return z
model = DuplicateModule()
fsdp_model = FSDP(copy.deepcopy(model), use_orig_params=True)
fsdp_model = torch.compile(fsdp_model, backend="aot_eager")
inp = torch.randn((2, 3), device=self.device_type)
local_out = model(inp)
fsdp_out = fsdp_model(inp)
self.assertEqual(local_out, fsdp_out)
@patch.object(config, "guard_nn_modules", True)
def test_fsdp_dup_tensors_diff_source(self):
"""
Tests that FSDP-managed modules' parameters and buffers with different
source do not result in incorrect AOTAutograd de-dup guards like
``a is b``, where ``a`` and ``b`` are certainly not the same. We check
this by checking for per-invocation recompiles.
"""
class BufModule(nn.Module):
def __init__(self) -> None:
super().__init__()
device_type = (
acc.type
if (acc := torch.accelerator.current_accelerator())
else "cpu"
)
self._buf = nn.Buffer(
torch.randn((3,), requires_grad=False, device=device_type)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self._buf
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
device_type = (
acc.type
if (acc := torch.accelerator.current_accelerator())
else "cpu"
)
self._param = nn.Parameter(torch.randn((1,), device=device_type))
self._buf_module = BufModule()
# Share the buffer, meaning same tensor but different source
self._buf = self._buf_module._buf
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Use the same buffer tensor twice in the compiled forward,
# including a data mutation to trigger de-dup logic
self._buf.mul_(2)
z = x + self._buf
z = self._buf_module(z)
z += self._param
return z
fsdp_model = FSDP(Model(), use_orig_params=True)
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
fsdp_model = torch.compile(fsdp_model, backend=cnt)
inp = torch.randn((2, 3), device=self.device_type)
for _ in range(15):
fsdp_model(inp)
# Check for no recompiles (if there were incorrect de-dup guards, then
# the frame count would be equal to the number of forward calls)
self.assertEqual(cnt.frame_count, 1)
def test_fsdp_staticmethod(self):
"""
Tests that Dynamo compiles staticmethods for FSDP-managed modules
correctly both when the staticmethod is invoked from the class and from
the object itself.
"""
class ModuleWithStaticMethod(nn.Module):
def __init__(self, use_self: bool):
super().__init__()
self._use_self = use_self
torch.manual_seed(42) # force `_param` to be deterministic
device_type = (
acc.type
if (acc := torch.accelerator.current_accelerator())
else "cpu"
)
self._param = nn.Parameter(torch.randn((3,), device=device_type))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._use_self:
z = self._add(x, self._param)
else:
z = ModuleWithStaticMethod._add(x, self._param)
z *= 2
return z
@staticmethod
def _add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
model = ModuleWithStaticMethod(False)
x = torch.randn((2, 3), device=self.device_type)
ref_out = model(x)
test_outs: list[torch.Tensor] = []
for use_self in (False, True):
model = ModuleWithStaticMethod(use_self)
fsdp_model = FSDP(model, use_orig_params=True)
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
fsdp_model = torch.compile(fsdp_model, backend=cnt)
test_outs.append(fsdp_model(x))
# Check for no recompiles, which could happen if incorrectly
# passing args to the staticmethod (e.g. doubly passing `self`)
# 3 is expected here for 1 forward.
# Graph 1 should be add and imul
self.assertEqual(cnt.frame_count, 1)
for test_out in test_outs:
self.assertEqual(test_out, ref_out)
def test_async_subclass_no_specialize(self):
cnt = torch._dynamo.testing.CompileCounterWithBackend("eager")
@torch.compile(backend=cnt, fullgraph=True, dynamic=True)
def f(x):
return x + 1
f(_maybe_wrap_tensor(torch.randn(10)))
f(_maybe_wrap_tensor(torch.randn(12)))
self.assertEqual(cnt.frame_count, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestSingleProc |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py | {
"start": 11981,
"end": 12898
} | class ____:
"""Test logging functionality."""
def test_custom_logger(self):
"""Test that custom logger is properly stored."""
import logging
custom_logger = logging.getLogger("test_confluence_logger")
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
logger=custom_logger,
)
assert reader.logger == custom_logger
def test_default_logger(self):
"""Test that default logger is used when none provided."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
# Should use internal logger
assert reader.logger is not None
assert hasattr(reader.logger, "info")
assert hasattr(reader.logger, "error")
assert hasattr(reader.logger, "warning")
| TestLogging |
python | django__django | tests/admin_custom_urls/tests.py | {
"start": 392,
"end": 5801
} | class ____(TestCase):
"""
Remember that:
* The Action model has a CharField PK.
* The ModelAdmin for Action customizes the add_view URL, it's
'<app name>/<model name>/!add/'
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
Action.objects.create(name="delete", description="Remove things.")
Action.objects.create(name="rename", description="Gives things other names.")
Action.objects.create(name="add", description="Add things.")
Action.objects.create(
name="path/to/file/", description="An action with '/' in its name."
)
Action.objects.create(
name="path/to/html/document.html",
description="An action with a name similar to a HTML doc path.",
)
Action.objects.create(
name="javascript:alert('Hello world');\">Click here</a>",
description="An action with a name suspected of being a XSS attempt",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_basic_add_GET(self):
"""
Ensure GET on the add_view works.
"""
add_url = reverse("admin_custom_urls:admin_custom_urls_action_add")
self.assertTrue(add_url.endswith("/!add/"))
response = self.client.get(add_url)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
"""
Ensure GET on the add_view plus specifying a field value in the query
string works.
"""
response = self.client.get(
reverse("admin_custom_urls:admin_custom_urls_action_add"),
{"name": "My Action"},
)
self.assertContains(response, 'value="My Action"')
def test_basic_add_POST(self):
"""
Ensure POST on add_view works.
"""
post_data = {
IS_POPUP_VAR: "1",
"name": "Action added through a popup",
"description": "Description of added action",
}
response = self.client.post(
reverse("admin_custom_urls:admin_custom_urls_action_add"), post_data
)
self.assertContains(response, "Action added through a popup")
def test_admin_URLs_no_clash(self):
# Should get the change_view for model instance with PK 'add', not show
# the add_view
url = reverse(
"admin_custom_urls:%s_action_change" % Action._meta.app_label,
args=(quote("add"),),
)
response = self.client.get(url)
self.assertContains(response, "Change action")
# Should correctly get the change_view for the model instance with the
# funny-looking PK (the one with a 'path/to/html/document.html' value)
url = reverse(
"admin_custom_urls:%s_action_change" % Action._meta.app_label,
args=(quote("path/to/html/document.html"),),
)
response = self.client.get(url)
self.assertContains(response, "Change action")
self.assertContains(response, 'value="path/to/html/document.html"')
def test_post_save_add_redirect(self):
"""
ModelAdmin.response_post_save_add() controls the redirection after
the 'Save' button has been pressed when adding a new object.
"""
post_data = {"name": "John Doe"}
self.assertEqual(Person.objects.count(), 0)
response = self.client.post(
reverse("admin_custom_urls:admin_custom_urls_person_add"), post_data
)
persons = Person.objects.all()
self.assertEqual(len(persons), 1)
redirect_url = reverse(
"admin_custom_urls:admin_custom_urls_person_history", args=[persons[0].pk]
)
self.assertRedirects(response, redirect_url)
def test_post_save_change_redirect(self):
"""
ModelAdmin.response_post_save_change() controls the redirection after
the 'Save' button has been pressed when editing an existing object.
"""
Person.objects.create(name="John Doe")
self.assertEqual(Person.objects.count(), 1)
person = Person.objects.all()[0]
post_url = reverse(
"admin_custom_urls:admin_custom_urls_person_change", args=[person.pk]
)
response = self.client.post(post_url, {"name": "Jack Doe"})
self.assertRedirects(
response,
reverse(
"admin_custom_urls:admin_custom_urls_person_delete", args=[person.pk]
),
)
def test_post_url_continue(self):
"""
The ModelAdmin.response_add()'s parameter `post_url_continue` controls
the redirection after an object has been created.
"""
post_data = {"name": "SuperFast", "_continue": "1"}
self.assertEqual(Car.objects.count(), 0)
response = self.client.post(
reverse("admin_custom_urls:admin_custom_urls_car_add"), post_data
)
cars = Car.objects.all()
self.assertEqual(len(cars), 1)
self.assertRedirects(
response,
reverse(
"admin_custom_urls:admin_custom_urls_car_history", args=[cars[0].pk]
),
)
| AdminCustomUrlsTest |
python | Textualize__textual | src/textual/messages.py | {
"start": 379,
"end": 498
} | class ____(Message, verbose=True, bubble=False):
"""Ask the node to prune (remove from DOM)."""
@rich.repr.auto
| Prune |
python | gevent__gevent | src/greentest/3.14/test_timeout.py | {
"start": 4637,
"end": 10283
} | class ____(TimeoutTestCase):
"""TCP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = self.enterContext(
socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self.addr_remote = resolve_address('www.python.org.', 80)
def testConnectTimeout(self):
# Testing connect timeout is tricky: we need to have IP connectivity
# to a host that silently drops our packets. We can't simulate this
# from Python because it's a function of the underlying TCP/IP stack.
# So, the following port on the pythontest.net host has been defined:
blackhole = resolve_address('pythontest.net', 56666)
# Blackhole has been configured to silently drop any incoming packets.
# No RSTs (for TCP) or ICMP UNREACH (for UDP/ICMP) will be sent back
# to hosts that attempt to connect to this address: which is exactly
# what we need to confidently test connect timeout.
# However, we want to prevent false positives. It's not unreasonable
# to expect certain hosts may not be able to reach the blackhole, due
# to firewalling or general network configuration. In order to improve
# our confidence in testing the blackhole, a corresponding 'whitehole'
# has also been set up using one port higher:
whitehole = resolve_address('pythontest.net', 56667)
# This address has been configured to immediately drop any incoming
# packets as well, but it does it respectfully with regards to the
# incoming protocol. RSTs are sent for TCP packets, and ICMP UNREACH
# is sent for UDP/ICMP packets. This means our attempts to connect to
# it should be met immediately with ECONNREFUSED. The test case has
# been structured around this premise: if we get an ECONNREFUSED from
# the whitehole, we proceed with testing connect timeout against the
# blackhole. If we don't, we skip the test (with a message about not
# getting the required RST from the whitehole within the required
# timeframe).
# For the records, the whitehole/blackhole configuration has been set
# up using the 'iptables' firewall, using the following rules:
#
# -A INPUT -p tcp --destination-port 56666 -j DROP
# -A INPUT -p udp --destination-port 56666 -j DROP
# -A INPUT -p tcp --destination-port 56667 -j REJECT
# -A INPUT -p udp --destination-port 56667 -j REJECT
#
# See https://github.com/python/psf-salt/blob/main/pillar/base/firewall/snakebite.sls
# for the current configuration.
skip = True
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
timeout = support.LOOPBACK_TIMEOUT
sock.settimeout(timeout)
sock.connect((whitehole))
except TimeoutError:
pass
except OSError as err:
if err.errno == errno.ECONNREFUSED:
skip = False
if skip:
self.skipTest(
"We didn't receive a connection reset (RST) packet from "
"{}:{} within {} seconds, so we're unable to test connect "
"timeout against the corresponding {}:{} (which is "
"configured to silently drop packets)."
.format(
whitehole[0],
whitehole[1],
timeout,
blackhole[0],
blackhole[1],
)
)
# All that hard work just to test if connect times out in 0.001s ;-)
self.addr_remote = blackhole
with socket_helper.transient_internet(self.addr_remote[0]):
self._sock_operation(1, 0.001, 'connect', self.addr_remote)
def testRecvTimeout(self):
# Test recv() timeout
with socket_helper.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
socket_helper.bind_port(self.sock, self.localhost)
self.sock.listen()
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
socket_helper.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
socket_helper.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# The address argument is ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
socket_helper.bind_port(serv, self.localhost)
serv.listen()
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
| TCPTimeoutTestCase |
python | django__django | tests/model_forms/tests.py | {
"start": 48536,
"end": 74729
} | class ____(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment"
)
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test"
)
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third"
)
self.w_royko = Writer.objects.create(name="Mike Royko")
self.w_woodward = Writer.objects.create(name="Bob Woodward")
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
'<div><label for="id_name">Name:</label><input type="text" name="name" '
'maxlength="20" required id="id_name"></div><div><label for="id_slug">Slug:'
'</label><input type="text" name="slug" maxlength="20" required '
'id="id_slug"></div><div><label for="id_url">The URL:</label>'
'<input type="text" name="url" maxlength="40" required id="id_url"></div>',
)
self.assertHTMLEqual(
str(f.as_ul()),
"""
<li><label for="id_name">Name:</label>
<input id="id_name" type="text" name="name" maxlength="20" required></li>
<li><label for="id_slug">Slug:</label>
<input id="id_slug" type="text" name="slug" maxlength="20" required></li>
<li><label for="id_url">The URL:</label>
<input id="id_url" type="text" name="url" maxlength="40" required></li>
""",
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required>""",
)
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required></li>
<li>The URL: <input type="text" name="url" maxlength="40" required></li>""",
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
"headline": "Your headline here",
"categories": [str(self.c1.id), str(self.c2.id)],
},
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="Your headline here" maxlength="50"
required>
</li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
# When the ModelForm is passed an instance, that instance's current
# values are inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'<div>Name:<div class="helptext">Use both first and last names.</div>'
'<input type="text" name="name" value="Mike Royko" maxlength="50" '
"required></div>",
)
art = Article.objects.create(
headline="Test article",
slug="test-article",
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article="Hello.",
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="Test article" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="test-article" maxlength="50" required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
f = ArticleForm(
{
"headline": "Test headline",
"slug": "test-headline",
"pub_date": "1984-02-06",
"writer": str(self.w_royko.pk),
"article": "Hello.",
},
instance=art,
)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, "Test headline")
def test_m2m_initial_callable(self):
"""
A callable can be provided as the initial value for an m2m field.
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == "categories":
kwargs["initial"] = lambda: Category.objects.order_by("name")[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as
# expected
ModelForm = modelform_factory(
Article,
fields=["headline", "categories"],
formfield_callback=formfield_for_dbfield,
)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required></li>
<li><label for="id_categories">Categories:</label>
<select multiple name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It's a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk),
)
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm(
{
"name": "Entertainment",
"slug": "entertainment",
"url": "entertainment",
}
)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["name"], "Entertainment")
self.assertEqual(f.cleaned_data["slug"], "entertainment")
self.assertEqual(f.cleaned_data["url"], "entertainment")
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object
# that hasn't yet been saved to the database. In this case, it's up to
# you to call save() on the resulting model instance.
f = BaseCategoryForm(
{"name": "Third test", "slug": "third-test", "url": "third"}
)
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({"name": "", "slug": "not a slug!", "url": "foo"})
self.assertEqual(f.errors["name"], ["This field is required."])
self.assertEqual(
f.errors["slug"],
[
"Enter a valid “slug” consisting of letters, numbers, underscores or "
"hyphens."
],
)
self.assertEqual(f.cleaned_data, {"url": "foo"})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({"name": "", "slug": "", "url": "foo"})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField,
# ForeignKeys and any fields with the 'choices' attribute are
# represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
"""
<div>Headline:
<input type="text" name="headline" maxlength="50" required>
</div>
<div>Slug:
<input type="text" name="slug" maxlength="50" required>
</div>
<div>Pub date:
<input type="text" name="pub_date" required>
</div>
<div>Writer:
<select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select>
</div>
<div>Article:
<textarea name="article" cols="40" rows="10" required></textarea>
</div>
<div>Categories:
<select name="categories" multiple>
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select>
</div>
<div>Status:
<select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option><option value="2">Pending</option>
<option value="3">Live</option>
</select>
</div>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.",
headline="New headline",
slug="new-headline",
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
)
new_art.categories.add(Category.objects.get(name="Entertainment"))
self.assertSequenceEqual(new_art.categories.all(), [self.c1])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="New headline" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="new-headline" maxlength="50" required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article:
<textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
"""
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to
# have a value of None. If a field isn't specified on a form, the
# object created from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "pub_date")
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'<div>Headline:<input type="text" name="headline" maxlength="50" required>'
'</div><div>Pub date:<input type="text" name="pub_date" required></div>',
)
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "slug", "pub_date")
w_royko = Writer.objects.create(name="Mike Royko")
art = Article.objects.create(
article="Hello.",
headline="New headline",
slug="new-headline",
pub_date=datetime.date(1988, 1, 4),
writer=w_royko,
)
f = PartialArticleFormWithSlug(
{
"headline": "New headline",
"slug": "new-headline",
"pub_date": "1988-01-04",
},
auto_id=False,
instance=art,
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li>Headline:
<input type="text" name="headline" value="New headline" maxlength="50"
required>
</li>
<li>Slug:
<input type="text" name="slug" value="new-headline" maxlength="50"
required>
</li>
<li>Pub date:
<input type="text" name="pub_date" value="1988-01-04" required></li>
""",
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, "New headline")
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
"headline": "New headline",
"slug": "new-headline",
"pub_date": "1988-01-04",
"writer": str(self.w_royko.pk),
"article": "Hello.",
"categories": [str(self.c1.id), str(self.c2.id)],
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertSequenceEqual(
new_art.categories.order_by("name"), [self.c1, self.c2]
)
# Now, submit form data with no categories. This deletes the existing
# categories.
form_data["categories"] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use
# commit=False. The m2m data won't be saved until save_m2m() is invoked
# on the form.
form_data["categories"] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertSequenceEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertSequenceEqual(
new_art.categories.order_by("name"), [self.c1, self.c2]
)
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the
# same fields as the Category model, we can just call the form's save()
# to apply its changes to an existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = "__all__"
cat = Category.objects.create(name="Third test")
form = ShortCategory(
{"name": "Third", "slug": "third", "url": "3rd"}, instance=cat
)
self.assertEqual(form.save().name, "Third")
self.assertEqual(Category.objects.get(id=cat.id).name, "Third")
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are
# determined at runtime, based on the data in the database when the
# form is displayed, not the data in the database when the form is
# instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'<li>Headline: <input type="text" name="headline" maxlength="50" required>'
"</li>"
'<li>Slug: <input type="text" name="slug" maxlength="50" required></li>'
'<li>Pub date: <input type="text" name="pub_date" required></li>'
'<li>Writer: <select name="writer" required>'
'<option value="" selected>---------</option>'
'<option value="%s">Bob Woodward</option>'
'<option value="%s">Mike Royko</option>'
"</select></li>"
'<li>Article: <textarea rows="10" cols="40" name="article" required>'
"</textarea></li>"
'<li>Categories: <select multiple name="categories">'
'<option value="%s">Entertainment</option>'
'<option value="%s">It's a test</option>'
'<option value="%s">Third test</option>'
"</select> </li>"
'<li>Status: <select name="status">'
'<option value="" selected>---------</option>'
'<option value="1">Draft</option>'
'<option value="2">Pending</option>'
'<option value="3">Live</option>'
"</select></li>"
% (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk),
)
c4 = Category.objects.create(name="Fourth", url="4th")
w_bernstein = Writer.objects.create(name="Carl Bernstein")
self.assertHTMLEqual(
f.as_ul(),
'<li>Headline: <input type="text" name="headline" maxlength="50" required>'
"</li>"
'<li>Slug: <input type="text" name="slug" maxlength="50" required></li>'
'<li>Pub date: <input type="text" name="pub_date" required></li>'
'<li>Writer: <select name="writer" required>'
'<option value="" selected>---------</option>'
'<option value="%s">Bob Woodward</option>'
'<option value="%s">Carl Bernstein</option>'
'<option value="%s">Mike Royko</option>'
"</select></li>"
'<li>Article: <textarea rows="10" cols="40" name="article" required>'
"</textarea></li>"
'<li>Categories: <select multiple name="categories">'
'<option value="%s">Entertainment</option>'
'<option value="%s">It's a test</option>'
'<option value="%s">Third test</option>'
'<option value="%s">Fourth</option>'
"</select></li>"
'<li>Status: <select name="status">'
'<option value="" selected>---------</option>'
'<option value="1">Draft</option>'
'<option value="2">Pending</option>'
'<option value="3">Live</option>'
"</select></li>"
% (
self.w_woodward.pk,
w_bernstein.pk,
self.w_royko.pk,
self.c1.pk,
self.c2.pk,
self.c3.pk,
c4.pk,
),
)
@isolate_apps("model_forms")
def test_callable_choices_are_lazy(self):
call_count = 0
def get_animal_choices():
nonlocal call_count
call_count += 1
return [("LION", "Lion"), ("ZEBRA", "Zebra")]
class ZooKeeper(models.Model):
animal = models.CharField(
blank=True,
choices=get_animal_choices,
max_length=5,
)
class ZooKeeperForm(forms.ModelForm):
class Meta:
model = ZooKeeper
fields = ["animal"]
self.assertEqual(call_count, 0)
form = ZooKeeperForm()
self.assertEqual(call_count, 0)
self.assertIsInstance(form.fields["animal"].choices, BlankChoiceIterator)
self.assertEqual(call_count, 0)
self.assertEqual(
form.fields["animal"].choices,
models.BLANK_CHOICE_DASH + [("LION", "Lion"), ("ZEBRA", "Zebra")],
)
self.assertEqual(call_count, 1)
def test_recleaning_model_form_instance(self):
"""
Re-cleaning an instance that was added via a ModelForm shouldn't raise
a pk uniqueness error.
"""
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = "__all__"
form = AuthorForm({"full_name": "Bob"})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = "Alice"
obj.full_clean()
def test_validate_foreign_key_uses_default_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = "__all__"
# Archived writers are filtered out by the default manager.
w = Writer.objects.create(name="Randy", archived=True)
data = {
"headline": "My Article",
"slug": "my-article",
"pub_date": datetime.date.today(),
"writer": w.pk,
"article": "lorem ipsum",
}
form = MyForm(data)
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors,
{
"writer": [
"Select a valid choice. That choice is not one of the available "
"choices."
]
},
)
def test_validate_foreign_key_to_model_with_overridden_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Allow archived authors.
self.fields["writer"].queryset = Writer._base_manager.all()
w = Writer.objects.create(name="Randy", archived=True)
data = {
"headline": "My Article",
"slug": "my-article",
"pub_date": datetime.date.today(),
"writer": w.pk,
"article": "lorem ipsum",
}
form = MyForm(data)
self.assertIs(form.is_valid(), True)
article = form.save()
self.assertEqual(article.writer, w)
| ModelFormBasicTests |
python | pydata__xarray | xarray/ufuncs.py | {
"start": 2244,
"end": 8877
} | class ____(_ufunc_wrapper):
"""Wrapper for dispatching binary ufuncs."""
def __call__(self, x, y, /, **kwargs):
xp = get_array_namespace(x, y)
func = getattr(xp, self.__name__)
return xr.apply_ufunc(func, x, y, dask="allowed", **kwargs)
def _skip_signature(doc, name):
if not isinstance(doc, str):
return doc
# numpy creates some functions as aliases and copies the docstring exactly,
# so check the actual name to handle this case
np_name = getattr(np, name).__name__
if doc.startswith(np_name):
signature_end = doc.find("\n\n")
doc = doc[signature_end + 2 :]
return doc
def _remove_unused_reference_labels(doc):
if not isinstance(doc, str):
return doc
max_references = 5
for num in range(max_references):
label = f".. [{num}]"
reference = f"[{num}]_"
index = f"{num}. "
if label not in doc or reference in doc:
continue
doc = doc.replace(label, index)
return doc
def _dedent(doc):
if not isinstance(doc, str):
return doc
return textwrap.dedent(doc)
# These can be auto-generated from the public numpy ufuncs:
# {name for name in dir(np) if isinstance(getattr(np, name), np.ufunc)}
# Generalized ufuncs that use core dimensions or produce multiple output
# arrays are not currently supported, and left commented out below.
# UNARY
abs = _unary_ufunc("abs")
absolute = _unary_ufunc("absolute")
acos = _unary_ufunc("acos")
acosh = _unary_ufunc("acosh")
arccos = _unary_ufunc("arccos")
arccosh = _unary_ufunc("arccosh")
arcsin = _unary_ufunc("arcsin")
arcsinh = _unary_ufunc("arcsinh")
arctan = _unary_ufunc("arctan")
arctanh = _unary_ufunc("arctanh")
asin = _unary_ufunc("asin")
asinh = _unary_ufunc("asinh")
atan = _unary_ufunc("atan")
atanh = _unary_ufunc("atanh")
bitwise_count = _unary_ufunc("bitwise_count")
bitwise_invert = _unary_ufunc("bitwise_invert")
bitwise_not = _unary_ufunc("bitwise_not")
cbrt = _unary_ufunc("cbrt")
ceil = _unary_ufunc("ceil")
conj = _unary_ufunc("conj")
conjugate = _unary_ufunc("conjugate")
cos = _unary_ufunc("cos")
cosh = _unary_ufunc("cosh")
deg2rad = _unary_ufunc("deg2rad")
degrees = _unary_ufunc("degrees")
exp = _unary_ufunc("exp")
exp2 = _unary_ufunc("exp2")
expm1 = _unary_ufunc("expm1")
fabs = _unary_ufunc("fabs")
floor = _unary_ufunc("floor")
# frexp = _unary_ufunc("frexp")
invert = _unary_ufunc("invert")
isfinite = _unary_ufunc("isfinite")
isinf = _unary_ufunc("isinf")
isnan = _unary_ufunc("isnan")
isnat = _unary_ufunc("isnat")
log = _unary_ufunc("log")
log10 = _unary_ufunc("log10")
log1p = _unary_ufunc("log1p")
log2 = _unary_ufunc("log2")
logical_not = _unary_ufunc("logical_not")
# modf = _unary_ufunc("modf")
negative = _unary_ufunc("negative")
positive = _unary_ufunc("positive")
rad2deg = _unary_ufunc("rad2deg")
radians = _unary_ufunc("radians")
reciprocal = _unary_ufunc("reciprocal")
rint = _unary_ufunc("rint")
sign = _unary_ufunc("sign")
signbit = _unary_ufunc("signbit")
sin = _unary_ufunc("sin")
sinh = _unary_ufunc("sinh")
spacing = _unary_ufunc("spacing")
sqrt = _unary_ufunc("sqrt")
square = _unary_ufunc("square")
tan = _unary_ufunc("tan")
tanh = _unary_ufunc("tanh")
trunc = _unary_ufunc("trunc")
# BINARY
add = _binary_ufunc("add")
arctan2 = _binary_ufunc("arctan2")
atan2 = _binary_ufunc("atan2")
bitwise_and = _binary_ufunc("bitwise_and")
bitwise_left_shift = _binary_ufunc("bitwise_left_shift")
bitwise_or = _binary_ufunc("bitwise_or")
bitwise_right_shift = _binary_ufunc("bitwise_right_shift")
bitwise_xor = _binary_ufunc("bitwise_xor")
copysign = _binary_ufunc("copysign")
divide = _binary_ufunc("divide")
# divmod = _binary_ufunc("divmod")
equal = _binary_ufunc("equal")
float_power = _binary_ufunc("float_power")
floor_divide = _binary_ufunc("floor_divide")
fmax = _binary_ufunc("fmax")
fmin = _binary_ufunc("fmin")
fmod = _binary_ufunc("fmod")
gcd = _binary_ufunc("gcd")
greater = _binary_ufunc("greater")
greater_equal = _binary_ufunc("greater_equal")
heaviside = _binary_ufunc("heaviside")
hypot = _binary_ufunc("hypot")
lcm = _binary_ufunc("lcm")
ldexp = _binary_ufunc("ldexp")
left_shift = _binary_ufunc("left_shift")
less = _binary_ufunc("less")
less_equal = _binary_ufunc("less_equal")
logaddexp = _binary_ufunc("logaddexp")
logaddexp2 = _binary_ufunc("logaddexp2")
logical_and = _binary_ufunc("logical_and")
logical_or = _binary_ufunc("logical_or")
logical_xor = _binary_ufunc("logical_xor")
# matmul = _binary_ufunc("matmul")
maximum = _binary_ufunc("maximum")
minimum = _binary_ufunc("minimum")
mod = _binary_ufunc("mod")
multiply = _binary_ufunc("multiply")
nextafter = _binary_ufunc("nextafter")
not_equal = _binary_ufunc("not_equal")
pow = _binary_ufunc("pow")
power = _binary_ufunc("power")
remainder = _binary_ufunc("remainder")
right_shift = _binary_ufunc("right_shift")
subtract = _binary_ufunc("subtract")
true_divide = _binary_ufunc("true_divide")
# vecdot = _binary_ufunc("vecdot")
# elementwise non-ufunc
angle = _unary_ufunc("angle")
isreal = _unary_ufunc("isreal")
iscomplex = _unary_ufunc("iscomplex")
__all__ = [
"abs",
"absolute",
"acos",
"acosh",
"add",
"angle",
"arccos",
"arccosh",
"arcsin",
"arcsinh",
"arctan",
"arctan2",
"arctanh",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_count",
"bitwise_invert",
"bitwise_left_shift",
"bitwise_not",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"cbrt",
"ceil",
"conj",
"conjugate",
"copysign",
"cos",
"cosh",
"deg2rad",
"degrees",
"divide",
"equal",
"exp",
"exp2",
"expm1",
"fabs",
"float_power",
"floor",
"floor_divide",
"fmax",
"fmin",
"fmod",
"gcd",
"greater",
"greater_equal",
"heaviside",
"hypot",
"invert",
"iscomplex",
"isfinite",
"isinf",
"isnan",
"isnat",
"isreal",
"lcm",
"ldexp",
"left_shift",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logaddexp2",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"mod",
"multiply",
"negative",
"nextafter",
"not_equal",
"positive",
"pow",
"power",
"rad2deg",
"radians",
"reciprocal",
"remainder",
"right_shift",
"rint",
"sign",
"signbit",
"sin",
"sinh",
"spacing",
"sqrt",
"square",
"subtract",
"tan",
"tanh",
"true_divide",
"trunc",
]
| _binary_ufunc |
python | django__django | tests/many_to_one/models.py | {
"start": 2459,
"end": 2665
} | class ____(models.Model):
parent = models.ForeignKey(
Parent, models.CASCADE, to_field="name", related_name="to_field_children"
)
# Multiple paths to the same model (#7110, #7125)
| ToFieldChild |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py | {
"start": 1780,
"end": 7602
} | class ____(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(expected_proto,
server_lib.ClusterSpec(
cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(expected_proto,
server_lib.ClusterSpec(
cluster_spec.as_dict()).as_cluster_def())
def testSingleItemSuccessfulRetrievalInCluster(self):
ret = _create_pod_list(
('tensorflow-abc123', 'Running', '10.1.2.3'),
)
cluster_resolver = KubernetesClusterResolver(
override_client=_mock_kubernetes_client({'job-name=tensorflow': ret}),
executable_location=ExecutableLocation.WITHIN_CLUSTER,
)
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.3:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
def testValueErrorRaisedOnInvalidExecutableLocation(self):
_mock_kubernetes_module()
with self.assertRaisesRegexp(ValueError, '.*'):
KubernetesClusterResolver(executable_location=None)
def testSingleItemSuccessfulRetrieval(self):
ret = _create_pod_list(('tensorflow-abc123', 'Running', '10.1.2.3'),)
cluster_resolver = KubernetesClusterResolver(
override_client=_mock_kubernetes_client(
{'job-name=tensorflow': ret}))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.3:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
def testSuccessfulRetrievalWithSort(self):
ret = _create_pod_list(
('tensorflow-abc123', 'Running', '10.1.2.3'),
('tensorflow-def456', 'Running', '10.1.2.4'),
('tensorflow-999999', 'Running', '10.1.2.5'))
cluster_resolver = KubernetesClusterResolver(
override_client=_mock_kubernetes_client(
{'job-name=tensorflow': ret}))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.5:8470' }
tasks { key: 1 value: '10.1.2.3:8470' }
tasks { key: 2 value: '10.1.2.4:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
def testGetMasterWithOverrideParameters(self):
ret = _create_pod_list(
('worker-0', 'Running', '10.1.2.3'),
('worker-1', 'Running', '10.1.2.4'),
('worker-2', 'Running', '10.1.2.5'))
cluster_resolver = KubernetesClusterResolver(
override_client=_mock_kubernetes_client(
{'job-name=tensorflow': ret}))
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 0
self.assertEqual(cluster_resolver.task_type, 'worker')
self.assertEqual(cluster_resolver.task_id, 0)
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
self.assertEqual(cluster_resolver.master('worker', 2),
'grpc://10.1.2.5:8470')
def testNonRunningPod(self):
ret = _create_pod_list(('tensorflow-abc123', 'Failed', '10.1.2.3'),)
cluster_resolver = KubernetesClusterResolver(
override_client=_mock_kubernetes_client(
{'job-name=tensorflow': ret}))
error_msg = 'Pod "tensorflow-abc123" is not running; phase: "Failed"'
with self.assertRaisesRegex(RuntimeError, error_msg):
cluster_resolver.cluster_spec()
def testMultiplePodSelectorsAndWorkers(self):
worker1 = _create_pod_list(
('tensorflow-abc123', 'Running', '10.1.2.3'),
('tensorflow-def456', 'Running', '10.1.2.4'),
('tensorflow-999999', 'Running', '10.1.2.5'))
worker2 = _create_pod_list(
('tensorflow-abc124', 'Running', '10.1.2.6'),
('tensorflow-def457', 'Running', '10.1.2.7'),
('tensorflow-999990', 'Running', '10.1.2.8'))
ps = _create_pod_list(
('tensorflow-ps-1', 'Running', '10.1.2.1'),
('tensorflow-ps-2', 'Running', '10.1.2.2'))
cluster_resolver = KubernetesClusterResolver(
job_to_label_mapping={
'worker': ['job-name=worker1', 'job-name=worker2'],
'ps': ['job-name=ps']
},
override_client=_mock_kubernetes_client({
'job-name=worker1': worker1,
'job-name=worker2': worker2,
'job-name=ps': ps
}))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'ps'
tasks { key: 0 value: '10.1.2.1:8470' }
tasks { key: 1 value: '10.1.2.2:8470' }
}
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.5:8470' }
tasks { key: 1 value: '10.1.2.3:8470' }
tasks { key: 2 value: '10.1.2.4:8470' }
tasks { key: 3 value: '10.1.2.8:8470' }
tasks { key: 4 value: '10.1.2.6:8470' }
tasks { key: 5 value: '10.1.2.7:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
if __name__ == '__main__':
test.main()
| KubernetesClusterResolverTest |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ridge.py | {
"start": 42354,
"end": 46378
} | class ____(LinearClassifierMixin):
def _prepare_data(self, X, y, sample_weight, solver):
"""Validate `X` and `y` and binarize `y`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
solver : str
The solver used in `Ridge` to know which sparse format to support.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Validated training data.
y : ndarray of shape (n_samples,)
Validated target values.
sample_weight : ndarray of shape (n_samples,)
Validated sample weights.
Y : ndarray of shape (n_samples, n_classes)
The binarized version of `y`.
"""
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
xp, _, device_ = get_namespace_and_device(X)
sample_weight = move_to(sample_weight, xp=xp, device=device_)
X, y = validate_data(
self,
X,
y,
accept_sparse=accept_sparse,
multi_output=True,
y_numeric=False,
force_writeable=True,
)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
xp_y, y_is_array_api = get_namespace(y)
Y = self._label_binarizer.fit_transform(y)
Y = move_to(Y, xp=xp, device=device_)
if y_is_array_api and xp_y.isdtype(y.dtype, "numeric"):
self.classes_ = move_to(
self._label_binarizer.classes_, xp=xp, device=device_
)
else:
self.classes_ = self._label_binarizer.classes_
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
reweighting = compute_sample_weight(self.class_weight, y)
reweighting = move_to(reweighting, xp=xp, device=device_)
sample_weight = sample_weight * reweighting
return X, y, sample_weight, Y
def predict(self, X):
"""Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
"""
check_is_fitted(self, attributes=["_label_binarizer"])
if self._label_binarizer.y_type_.startswith("multilabel"):
# Threshold such that the negative label is -1 and positive label
# is 1 to use the inverse transform of the label binarizer fitted
# during fit.
decision = self.decision_function(X)
xp, _ = get_namespace(decision)
scores = 2.0 * xp.astype(decision > 0, decision.dtype) - 1.0
return self._label_binarizer.inverse_transform(scores)
return super().predict(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_label = True
return tags
def _get_scorer_instance(self):
"""Return a scorer which corresponds to what's defined in ClassiferMixin
parent class. This is used for routing `sample_weight`.
"""
return get_scorer("accuracy")
| _RidgeClassifierMixin |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 64410,
"end": 64764
} | class ____:
@classmethod
def create(cls, **kwargs):
instance = cls(**kwargs)
instance.id = 1
return instance
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
self.id = None
@unittest.skipIf(SKIP_DJANGO, "django tests disabled.")
| BetterFakeModel |
python | getsentry__sentry | src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py | {
"start": 3206,
"end": 24460
} | class ____(TypedDict):
project_id: int
sample_rate: float
organization_allowlist: list[int]
def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]:
configs: list[SDKCrashDetectionConfig] = []
cocoa_options = _get_options(sdk_name=SdkName.Cocoa, has_organization_allowlist=False)
if cocoa_options:
# Since changing the debug image type to macho (https://github.com/getsentry/sentry-cocoa/pull/2701)
# released in sentry-cocoa 8.2.0 (https://github.com/getsentry/sentry-cocoa/blob/main/CHANGELOG.md#820),
# the frames contain the full paths required for detecting system frames in is_system_library_frame.
# Therefore, we require at least sentry-cocoa 8.2.0.
cocoa_min_sdk_version = "8.2.0"
cocoa_config = SDKCrashDetectionConfig(
sdk_name=SdkName.Cocoa,
project_id=cocoa_options["project_id"],
sample_rate=cocoa_options["sample_rate"],
organization_allowlist=cocoa_options["organization_allowlist"],
sdk_names={
"sentry.cocoa": cocoa_min_sdk_version,
"sentry.cocoa.capacitor": cocoa_min_sdk_version,
"sentry.cocoa.react-native": cocoa_min_sdk_version,
"sentry.cocoa.dotnet": cocoa_min_sdk_version,
"sentry.cocoa.flutter": cocoa_min_sdk_version,
"sentry.cocoa.kmp": cocoa_min_sdk_version,
"sentry.cocoa.unity": cocoa_min_sdk_version,
"sentry.cocoa.unreal": cocoa_min_sdk_version,
},
report_fatal_errors=False,
ignore_mechanism_type=set(),
allow_mechanism_type=set(),
system_library_path_patterns={r"/System/Library/**", r"/usr/lib/**"},
sdk_frame_config=SDKFrameConfig(
function_patterns={
r"*sentrycrash*",
r"*\[Sentry*",
r"*(Sentry*)*", # Objective-C class extension categories
r"SentryMX*", # MetricKit Swift classes
},
path_patterns={"Sentry**"},
path_replacer=FixedPathReplacer(path="Sentry.framework"),
),
sdk_crash_ignore_matchers={
# [SentrySDK crash] is a testing function causing a crash.
# Therefore, we don't want to mark it a as a SDK crash.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="**SentrySDK crash**",
),
# [SentrySDKInternal crash] is a testing function causing a crash.
# Therefore, we don't want to mark it a as a SDK crash.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="**SentrySDKInternal crash**",
),
# SentryCrashExceptionApplicationHelper._crashOnException calls abort() intentionally, which would cause false positives.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="**SentryCrashExceptionApplicationHelper _crashOnException**",
),
},
)
configs.append(cocoa_config)
react_native_options = _get_options(
sdk_name=SdkName.ReactNative, has_organization_allowlist=True
)
if react_native_options:
react_native_config = SDKCrashDetectionConfig(
sdk_name=SdkName.ReactNative,
project_id=react_native_options["project_id"],
sample_rate=react_native_options["sample_rate"],
organization_allowlist=react_native_options["organization_allowlist"],
# 4.0.0 was released in June 2022, see https://github.com/getsentry/sentry-react-native/releases/tag/4.0.0.
# We require at least sentry-react-native 4.0.0 to only detect SDK crashes for not too old versions.
sdk_names={
"sentry.javascript.react-native": "4.0.0",
},
report_fatal_errors=False,
# used by the JS/RN SDKs
# https://github.com/getsentry/sentry-javascript/blob/dafd51054d8b2ab2030fa0b16ad0fd70493b6e08/packages/core/src/integrations/captureconsole.ts#L60
ignore_mechanism_type={"console"},
allow_mechanism_type=set(),
system_library_path_patterns={
r"**/react-native/Libraries/**",
r"**/react-native-community/**",
},
sdk_frame_config=SDKFrameConfig(
function_patterns=set(),
path_patterns={
# Development path
r"**/sentry-react-native/dist/**",
# Production paths taken from https://github.com/getsentry/sentry-react-native/blob/037d5fa2f38b02eaf4ca92fda569e0acfd6c3ebe/package.json#L68-L77
r"**/@sentry/react-native/**",
r"**/@sentry/browser/**",
r"**/@sentry/cli/**",
r"**/@sentry/core/**",
r"**/@sentry/hub/**",
r"**/@sentry/integrations/**",
r"**/@sentry/react/**",
r"**/@sentry/types/**",
r"**/@sentry/utils/**",
},
path_replacer=KeepAfterPatternMatchPathReplacer(
patterns={
r"\/sentry-react-native\/.*",
# We don't add the first / here because module isn't prefixed with /.
# We don't need to specify all production paths because the path replacer only runs for SDK frames.
r"@sentry\/*",
},
fallback_path="sentry-react-native",
),
),
sdk_crash_ignore_matchers={
# sentryWrapped rethrows the original error
# https://github.com/getsentry/sentry-javascript/blob/a67ebc4f56fd20259bffbe194e8e92e968589c12/packages/browser/src/helpers.ts#L107
FunctionAndModulePattern(
module_pattern="*",
function_pattern="sentryWrapped",
),
},
)
configs.append(react_native_config)
# 0.6.0 was released in Feb 2023, see https://github.com/getsentry/sentry-native/releases/tag/0.6.0.
native_min_sdk_version = "0.6.0"
java_options = _get_options(sdk_name=SdkName.Java, has_organization_allowlist=True)
if java_options:
# The sentry-java SDK sends SDK frames for uncaught exceptions since 7.0.0, which is required for detecting SDK crashes.
# 7.0.0 was released in Nov 2023, see https://github.com/getsentry/sentry-java/releases/tag/7.0.0
java_min_sdk_version = "7.0.0"
java_config = SDKCrashDetectionConfig(
sdk_name=SdkName.Java,
project_id=java_options["project_id"],
sample_rate=java_options["sample_rate"],
organization_allowlist=java_options["organization_allowlist"],
sdk_names={
"sentry.java.android": java_min_sdk_version,
"sentry.java.android.capacitor": java_min_sdk_version,
"sentry.java.android.dotnet": java_min_sdk_version,
"sentry.java.android.flutter": java_min_sdk_version,
"sentry.java.android.kmp": java_min_sdk_version,
"sentry.java.android.react-native": java_min_sdk_version,
"sentry.java.android.timber": java_min_sdk_version,
"sentry.java.android.unity": java_min_sdk_version,
"sentry.java.android.unreal": java_min_sdk_version,
"sentry.java.jul": java_min_sdk_version,
"sentry.java.kmp": java_min_sdk_version,
"sentry.java.log4j2": java_min_sdk_version,
"sentry.java.logback": java_min_sdk_version,
"sentry.java.opentelemetry.agent": java_min_sdk_version,
"sentry.java.spring": java_min_sdk_version,
"sentry.java.spring-boot": java_min_sdk_version,
"sentry.java.spring-boot.jakarta": java_min_sdk_version,
"sentry.java.spring.jakarta": java_min_sdk_version,
# Required for getting Android Runtime Tracer crashes.
# This is the same as for the native SDK Crash Detection Config
"sentry.native.android": native_min_sdk_version,
},
report_fatal_errors=False,
ignore_mechanism_type=set(),
allow_mechanism_type={"ANR", "AppExitInfo"},
system_library_path_patterns={
r"java.**",
r"javax.**",
r"android.**",
r"androidx.**",
r"com.android.internal.**",
r"kotlin.**",
r"dalvik.**",
r"/apex/com.android.*/lib*/**",
},
sdk_frame_config=SDKFrameConfig(
function_patterns=set(),
path_patterns={
r"io.sentry.**",
},
# The Android Runtime Tracer can crash when users enable profiling in the
# Sentry Android SDK. While the Sentry Android SDK doesn't directly cause
# these crashes, we must know when they occur. As Sentry doesn't appear in
# the stacktrace, we filter for the following specific methods in the
# specified Android apex packages.
function_and_path_patterns=[
FunctionAndPathPattern(
function_pattern=r"*pthread_getcpuclockid*",
path_pattern=r"/apex/com.android.runtime/lib64/bionic/libc.so",
),
FunctionAndPathPattern(
function_pattern=r"*art::Trace::StopTracing*",
path_pattern=r"/apex/com.android.art/lib64/libart.so",
),
FunctionAndPathPattern(
function_pattern=r"*art::Thread::DumpState*",
path_pattern=r"/apex/com.android.art/lib64/libart.so",
),
],
path_replacer=KeepFieldPathReplacer(fields={"module", "filename", "package"}),
),
sdk_crash_ignore_matchers={
FunctionAndModulePattern(
module_pattern="io.sentry.graphql.SentryInstrumentation",
function_pattern="lambda$instrumentExecutionResult$0",
),
FunctionAndModulePattern(
module_pattern="io.sentry.graphql.SentryGraphqlInstrumentation",
function_pattern="instrumentExecutionResultComplete",
),
},
)
configs.append(java_config)
native_options = _get_options(sdk_name=SdkName.Native, has_organization_allowlist=True)
if native_options:
native_config = SDKCrashDetectionConfig(
sdk_name=SdkName.Native,
project_id=native_options["project_id"],
sample_rate=native_options["sample_rate"],
organization_allowlist=native_options["organization_allowlist"],
sdk_names={
"sentry.native": native_min_sdk_version,
"sentry.native.android": native_min_sdk_version,
"sentry.native.android.capacitor": native_min_sdk_version,
"sentry.native.android.flutter": native_min_sdk_version,
"sentry.native.android.react-native": native_min_sdk_version,
"sentry.native.android.unity": native_min_sdk_version,
"sentry.native.android.unreal": native_min_sdk_version,
"sentry.native.dotnet": native_min_sdk_version,
"sentry.native.unity": native_min_sdk_version,
"sentry.native.unreal": native_min_sdk_version,
},
report_fatal_errors=False,
ignore_mechanism_type=set(),
allow_mechanism_type=set(),
system_library_path_patterns={
# well known locations for unix paths
r"/lib/**",
r"/usr/lib/**",
r"/usr/local/lib/**",
r"/usr/local/Cellar/**",
r"linux-gate.so*",
# others
r"/System/Library/Frameworks/**", # macOS
r"C:/Windows/**",
r"/system/**",
r"/vendor/**",
r"**/libart.so",
r"/apex/com.android.*/lib*/**", # Android
},
sdk_frame_config=SDKFrameConfig(
function_patterns={
r"sentry_*", # public interface
r"sentry__*", # module level interface
r"Java_io_sentry_android_ndk_*", # JNI interface
},
path_patterns=set(),
path_replacer=KeepAfterPatternMatchPathReplacer(
patterns={
r"sentry_.*",
},
fallback_path="sentry",
),
),
sdk_crash_ignore_matchers=set(),
)
configs.append(native_config)
dart_options = _get_options(sdk_name=SdkName.Dart, has_organization_allowlist=True)
if dart_options:
# Since 8.2.0 the Dart SDK sends SDK frames, which is required;
# see https://github.com/getsentry/sentry-dart/releases/tag/8.2.0
dart_min_sdk_version = "8.2.1"
dart_config = SDKCrashDetectionConfig(
sdk_name=SdkName.Dart,
project_id=dart_options["project_id"],
sample_rate=dart_options["sample_rate"],
organization_allowlist=dart_options["organization_allowlist"],
sdk_names={
"sentry.dart": dart_min_sdk_version,
"sentry.dart.flutter": dart_min_sdk_version,
},
report_fatal_errors=True,
ignore_mechanism_type=set(),
allow_mechanism_type=set(),
system_library_path_patterns={
# Dart
r"org-dartlang-sdk:///**",
r"dart:**/**",
# Flutter
r"**/packages/flutter/**",
r"package:flutter/**",
},
sdk_frame_config=SDKFrameConfig(
function_patterns=set(),
path_patterns={
# non-obfuscated builds
r"package:sentry/**", # sentry-dart
r"package:sentry_flutter/**", # sentry-dart-flutter
# sentry-dart packages
r"package:sentry_logging/**",
r"package:sentry_dio/**",
r"package:sentry_file/**",
r"package:sentry_sqflite/**",
r"package:sentry_drift/**",
r"package:sentry_hive/**",
r"package:sentry_isar/**",
r"package:sentry_link/**",
r"package:sentry_firebase_remote_config/**",
# obfuscated builds
r"/**/.pub-cache/**/sentry**",
},
path_replacer=KeepFieldPathReplacer(fields={"package", "filename", "abs_path"}),
),
sdk_crash_ignore_matchers={
# getCurrentStackTrace is always part of the stacktrace when the SDK captures the stacktrace,
# and would cause false positives. Therefore, we ignore it.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="getCurrentStackTrace",
),
# Ignore handleDrawFrame and handleBeginFrame to avoid false positives.
# In the Sentry Flutter SDK, we override the handleDrawFrame and handleBeginFrame methods,
# add our custom implementation on top to instrument frame tracking and then forward the calls to Flutter.
# However every custom implementation is try/catch guarded so no exception can be thrown.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="SentryWidgetsBindingMixin.handleDrawFrame",
),
FunctionAndModulePattern(
module_pattern="*",
function_pattern="SentryWidgetsBindingMixin.handleBeginFrame",
),
# This is the integration responsible for reporting unhandled errors.
# For certain errors the frame is sometimes included in the stacktrace which leads to false positives.
FunctionAndModulePattern(
module_pattern="*",
function_pattern="FlutterErrorIntegration.call.<fn>",
),
},
)
configs.append(dart_config)
dotnet_options = _get_options(sdk_name=SdkName.Dotnet, has_organization_allowlist=True)
if dotnet_options:
# Unity SDK contains .NET SDK, so the versions must match. 0.24.0 Unity release was
# based on 3.22.0 .NET release. From that point on SDK names and frames should be consistent.
dotnet_min_sdk_version = "3.22.0"
unity_min_sdk_version = "0.24.0"
dotnet_config = SDKCrashDetectionConfig(
sdk_name=SdkName.Dotnet,
project_id=dotnet_options["project_id"],
sample_rate=dotnet_options["sample_rate"],
organization_allowlist=dotnet_options["organization_allowlist"],
sdk_names={
"sentry.dotnet": dotnet_min_sdk_version,
"sentry.dotnet.android": dotnet_min_sdk_version,
"sentry.dotnet.aspnet": dotnet_min_sdk_version,
"sentry.dotnet.aspnetcore": dotnet_min_sdk_version,
"sentry.dotnet.aspnetcore.grpc": dotnet_min_sdk_version,
"sentry.dotnet.cocoa": dotnet_min_sdk_version,
"sentry.dotnet.ef": dotnet_min_sdk_version,
"sentry.dotnet.extensions.logging": dotnet_min_sdk_version,
"sentry.dotnet.google-cloud-function": dotnet_min_sdk_version,
"sentry.dotnet.log4net": dotnet_min_sdk_version,
"sentry.dotnet.maui": dotnet_min_sdk_version,
"sentry.dotnet.nlog": dotnet_min_sdk_version,
"sentry.dotnet.serilog": dotnet_min_sdk_version,
"sentry.dotnet.xamarin": dotnet_min_sdk_version,
"sentry.dotnet.xamarin-forms": dotnet_min_sdk_version,
"sentry.dotnet.unity": unity_min_sdk_version,
"sentry.unity": unity_min_sdk_version,
"sentry.unity.lite": unity_min_sdk_version,
},
# Report fatal errors, since there are no crashes in Unity
report_fatal_errors=True,
ignore_mechanism_type=set(),
allow_mechanism_type=set(),
system_library_path_patterns={
# .NET System libraries
r"System.**",
r"Microsoft.**",
r"mscorlib**",
r"netstandard**",
# Unity engine libraries
r"UnityEngine.**",
r"UnityEditor.**",
# Common .NET Core/Framework paths
r"**.NETCoreApp**",
r"**.NETFramework**",
r"**.NETStandard**",
},
sdk_frame_config=SDKFrameConfig(
function_patterns=set(),
path_patterns={
# Main Sentry .NET SDK modules
r"Sentry.**",
# Unity-specific Sentry paths (for cases where abs_path is available)
r"**/sentry-unity/**",
r"**/sentry-dotnet/**",
},
path_replacer=KeepFieldPathReplacer(fields={"module", "package", "filename"}),
),
sdk_crash_ignore_matchers={
FunctionAndModulePattern(
module_pattern="Sentry.Samples.**",
function_pattern="*",
),
},
)
configs.append(dotnet_config)
return configs
def _get_options(
sdk_name: SdkName, has_organization_allowlist: bool
) -> SDKCrashDetectionOptions | None:
options_prefix = f"issues.sdk_crash_detection.{sdk_name.value}"
project_id = options.get(f"{options_prefix}.project_id")
if not project_id:
return None
sample_rate = options.get(f"{options_prefix}.sample_rate")
if not sample_rate:
return None
organization_allowlist: list[int] = []
if has_organization_allowlist:
organization_allowlist = options.get(f"{options_prefix}.organization_allowlist")
return SDKCrashDetectionOptions(
project_id=project_id,
sample_rate=sample_rate,
organization_allowlist=organization_allowlist,
)
| SDKCrashDetectionOptions |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/__init__.py | {
"start": 9448,
"end": 9969
} | class ____(WindowsTargetParser):
"""Composite argument parser for a Windows SSH target."""
@property
def option_name(self) -> str:
"""The option name used for this parser."""
return '--target-windows'
@property
def allow_inventory(self) -> bool:
"""True if inventory is allowed, otherwise False."""
return False
@property
def limit_one(self) -> bool:
"""True if only one target is allowed, otherwise False."""
return True
| WindowsSshTargetParser |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_write_border.py | {
"start": 332,
"end": 903
} | class ____(unittest.TestCase):
"""
Test the Styles _write_border() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_border(self):
"""Test the _write_border() method"""
xf_format = Format()
xf_format.has_border = True
self.styles._write_border(xf_format)
exp = """<border><left/><right/><top/><bottom/><diagonal/></border>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteBorder |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 11668,
"end": 12211
} | class ____(HelperFunction):
def _calculate(self, X, y, logger, feat_type):
if len(y.shape) == 2:
occurences = []
for i in range(y.shape[1]):
occurences.append(self._calculate(X, y[:, i], logger, feat_type))
return occurences
else:
occurence_dict = defaultdict(float)
for value in y:
occurence_dict[value] += 1
return occurence_dict
@metafeatures.define("ClassProbabilityMin", dependency="ClassOccurences")
| ClassOccurences |
python | pytorch__pytorch | test/test_mps.py | {
"start": 381950,
"end": 396062
} | class ____(NNTestCase):
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.layer_dummy_buf = Buffer(torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.dummy_buf = Buffer(torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
# weights_only=False as this is a legacy use case that loads a module
m = torch.load(path, weights_only=False)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
# weights_only=False as this is a legacy use case that loads a module
m = torch.load(path, encoding='utf-8', weights_only=False)
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_conv_expand(self):
device = 'mps'
input_ = torch.rand(2, 3, 16, 16, device=device)
kernel = torch.rand(1, 1, 3, 11, device=device)
tmp_kernel = kernel.expand(-1, 3, -1, -1)
output = F.conv2d(input_, tmp_kernel, groups=1, padding=0, stride=1)
# The test should not crash
def test_permute(self):
M_cpu = torch.randn(5, 5)
M_mps = M_cpu.to('mps')
output_cpu = M_cpu.permute(1, 0)
output_mps = M_mps.permute(1, 0)
self.assertEqual(output_cpu, output_mps)
self.assertEqual(output_cpu.size(), output_mps.size())
# Printing of non_contiguous should not crash
def test_print_non_contiguous(self):
# print(obj) is equivalent to calling `x=str(obj); print(x)`
# Use assertTrue in case to make sure non-empty string is returned
self.assertTrue(str(torch.ones(100, 100, device='mps').nonzero()))
self.assertTrue(str(torch.ones(100, 100, device='mps').nonzero().contiguous()))
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertIsNone(module.weight.grad)
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
# Force set to zeros.
module.zero_grad(set_to_none=False)
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
module.zero_grad()
self.assertIsNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_conv2d_discontiguous_weight(self):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Zero stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Input and weights on different devices
self.assertRaisesRegex(RuntimeError,
'must be on the same device',
lambda: torch.conv2d(torch.rand(1, 3, 32, 32), torch.rand(1, 3, 3, 3, device='mps')))
self.assertRaisesRegex(RuntimeError,
'Input type \\(MPSFloatType\\) and weight type \\(torch\\.FloatTensor\\) should be the same',
lambda: torch.conv2d(torch.rand(1, 3, 32, 32, device='mps'), torch.rand(1, 3, 3, 3)))
def test_conv2d_valid_padding(self, device='mps'):
# Test F.conv2d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 10, device=device).to(torch.float)
y = torch.rand(1, 1, 1, 4, device=device).to(torch.float)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect.to('cpu'), actual.to('cpu'))
def test_conv2d_backward_collision(self):
# Test for https://github.com/pytorch/pytorch/issues/112998
x = torch.rand(1, 1, 10, 10, device="mps", requires_grad=True)
m1 = nn.Conv2d(1, 1, 3, stride=2, padding=1).to("mps")
m2 = nn.Conv2d(1, 1, 4, stride=2, padding=1).to("mps")
y1, y2 = m1(x), m2(x)
self.assertEqual(y1.shape, y2.shape)
y1.sum().backward()
# This used to crash with MPSNDArrayConvolutionA14.mm:4352: failed assertion
y2.sum().backward()
def test_conv3d_backward_collision(self):
# Conv3D is only available from MacOS 13.2 onwards
x = torch.rand(1, 1, 10, 10, 20, device="mps", requires_grad=True)
m1 = nn.Conv3d(1, 1, 3, stride=2, padding=1).to("mps")
m2 = nn.Conv3d(1, 1, 4, stride=2, padding=1).to("mps")
y1, y2 = m1(x), m2(x)
self.assertEqual(y1.shape, y2.shape)
y1.sum().backward()
# This used to crash with MPSNDArrayConvolutionA14.mm:4352: failed assertion
y2.sum().backward()
# Regression test for https://github.com/pytorch/pytorch/issues/141471
def test_conv3d_channels_last_3d(self):
m_cpu = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0), device="cpu")
m_mps = copy.deepcopy(m_cpu).to("mps")
x_cpu = torch.randn(20, 16, 10, 50, 100, device="cpu").to(memory_format=torch.channels_last_3d)
x_mps = x_cpu.detach().clone().to("mps")
res_cpu = m_cpu(x_cpu)
res_mps = m_mps(x_mps)
self.assertEqual(res_cpu, res_mps)
def test_gemm_permute_transpose(self):
batch_size = 32
n = 20
hidden = 768
num_attention_heads = 12
attention_head_size = hidden // num_attention_heads
def transpose_for_scores(x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (num_attention_heads, attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def attention2(key, *, workaround=False, device):
key = transpose_for_scores(key)
res = key.transpose(-1, -2)
return res
A = torch.randn(batch_size, n, hidden)
A_mps = A.detach().clone().to("mps")
r1 = attention2(A, device="cpu")
r2 = attention2(A_mps, device="mps")
r2_cpu = r2.to("cpu")
self.assertEqual(r1, r2_cpu)
def test_group_norm_backward(self, device='mps'):
# See https://github.com/pytorch/pytorch/issues/88331 for more detail
shape = [1, 4, 16, 16]
x = torch.full(shape, 7.0, device=device)
target = torch.ones((1, 3, 128, 128), device=device)
conv_in = nn.Conv2d(4, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), device=device)
conv_out = nn.Conv2d(128, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), device=device)
norm = nn.GroupNorm(32, 128, eps=1e-6, affine=True, device=device)
with torch.enable_grad():
x = x.detach().requires_grad_()
out = 5.5 * x
out = conv_in(out)
out = out + norm(out)
out = out + norm(out)
out = out + norm(out)
out = F.interpolate(out, scale_factor=8.0, mode="nearest")
out = norm(out)
out = conv_out(out)
loss = (out - target).norm(dim=-1).sum()
grad = -torch.autograd.grad(loss, x)[0]
self.assertFalse(grad.detach().isnan().any().item(), 'NaN gradients returned by autograd')
# def test_conv2d_same_padding(self, device='mps'):
# x = torch.rand(1, 1, 10, 11, device=device)
# y = torch.rand(1, 1, 4, 5, device=device)
# expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
# actual = F.conv2d(x, y, padding='same')
# self.assertEqual(expect.to('cpu'), actual.to('cpu'))
# # With dilation
# y = torch.rand(1, 1, 3, 4, device=device)
# expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
# actual = F.conv2d(x, y, padding='same', dilation=2)
# self.assertEqual(expect, actual)
# # Dilation with asymmetric padding
# y = torch.rand(1, 1, 4, 4, device=device)
# expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
# actual = F.conv2d(x, y, padding='same', dilation=3)
# self.assertEqual(expect, actual)
| TestNNMPS |
python | modin-project__modin | modin/config/envvars.py | {
"start": 38742,
"end": 38962
} | class ____(EnvironmentVariable, type=str):
"""Set to AWS_SECRET_ACCESS_KEY when running mock S3 tests for Modin in GitHub CI."""
varname = "AWS_SECRET_ACCESS_KEY"
default = "foobar_secret"
| CIAWSSecretAccessKey |
python | huggingface__transformers | src/transformers/models/smolvlm/modular_smolvlm.py | {
"start": 4627,
"end": 4697
} | class ____(Idefics3VisionTransformer):
pass
| SmolVLMVisionTransformer |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/qtenum.py | {
"start": 90,
"end": 2622
} | class ____(ListParameter):
def __init__(self, enum, searchObj=QtCore.Qt, **opts):
"""
Constructs a list of allowed enum values from the enum class provided
`searchObj` is only needed for PyQt5 compatibility, where it must be the module holding the enum.
For instance, if making a QtEnumParameter out of QtWidgets.QFileDialog.Option, `searchObj` would
be QtWidgets.QFileDialog
"""
self.enum = enum
self.searchObj = searchObj
opts.setdefault('name', enum.__name__)
self.enumMap = self._getAllowedEnums(enum)
opts.update(limits=self.formattedLimits())
super().__init__(**opts)
def setValue(self, value, blockSignal=None):
if isinstance(value, str):
value = self.enumMap[value]
super().setValue(value, blockSignal)
def formattedLimits(self):
# Title-cased words without the ending substring for brevity
mapping = self.enumMap
shortestName = min(len(name) for name in mapping)
names = list(mapping)
cmpName, *names = names
substringEnd = next(
(
ii + 1
for ii in range(-1, -shortestName - 1, -1)
if any(cmpName[ii] != curName[ii] for curName in names)
),
None,
)
# Special case of 0: Set to None to avoid null string
if substringEnd == 0:
substringEnd = None
return {kk[:substringEnd]: vv for kk, vv in self.enumMap.items()}
def saveState(self, filter=None):
state = super().saveState(filter)
reverseMap = dict(zip(self.enumMap.values(), self.enumMap))
state['value'] = reverseMap[state['value']]
return state
def _getAllowedEnums(self, enum):
"""Pyside provides a dict for easy evaluation"""
if issubclass(enum, Enum):
# PyQt6 and PySide6 (opt-in in 6.3.1) use python enums
vals = {e.name: e for e in enum}
elif 'PySide' in QT_LIB:
vals = enum.values
elif 'PyQt5' in QT_LIB:
vals = {}
for key in dir(self.searchObj):
value = getattr(self.searchObj, key)
if isinstance(value, enum):
vals[key] = value
else:
raise RuntimeError(f'Cannot find associated enum values for qt lib {QT_LIB}')
# Remove "M<enum>" since it's not a real option
vals.pop(f'M{enum.__name__}', None)
return vals
| QtEnumParameter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 64407,
"end": 64710
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("app_id", "check_name")
app_id = sgqlc.types.Field(Int, graphql_name="appId")
check_name = sgqlc.types.Field(String, graphql_name="checkName")
| CheckSuiteFilter |
python | Netflix__metaflow | metaflow/plugins/azure/azure_secret_manager_secrets_provider.py | {
"start": 895,
"end": 1034
} | class ____(MetaflowException):
"""Raised when the secret version does not match expected pattern"""
| MetaflowAzureKeyVaultBadSecretVersion |
python | huggingface__transformers | src/transformers/models/esm/modeling_esmfold.py | {
"start": 41239,
"end": 46324
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
sequence_state_dim = config.sequence_state_dim
pairwise_state_dim = config.pairwise_state_dim
sequence_num_heads = sequence_state_dim // config.sequence_head_width
pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width
self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim)
self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads)
self.seq_attention = EsmFoldSelfAttention(
sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True
)
self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True)
self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False)
self.tri_att_start = EsmFoldTriangleAttention(
pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=True
)
self.tri_att_end = EsmFoldTriangleAttention(
pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=False
)
self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout)
self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout)
self.drop = nn.Dropout(config.dropout)
self.row_drop = EsmFoldDropout(config.dropout * 2, 2)
self.col_drop = EsmFoldDropout(config.dropout * 2, 1)
def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
"""
Inputs:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
tensor of valid positions
Output:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
"""
if len(sequence_state.shape) != 3:
raise ValueError(f"`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.")
if len(pairwise_state.shape) != 4:
raise ValueError(f"`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.")
if mask is not None and len(mask.shape) != 2:
raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
pairwise_state_dim = pairwise_state.shape[3]
if sequence_state_dim != self.config.sequence_state_dim:
raise ValueError(
"`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got "
f"{sequence_state_dim} != {self.config.sequence_state_dim}."
)
if pairwise_state_dim != self.config.pairwise_state_dim:
raise ValueError(
"`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got "
f"{pairwise_state_dim} != {self.config.pairwise_state_dim}."
)
if batch_dim != pairwise_state.shape[0]:
raise ValueError(
f"`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != "
f"{pairwise_state.shape[0]}."
)
if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]:
raise ValueError(
f"`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != "
f"{pairwise_state.shape[1]} or {pairwise_state.shape[2]}."
)
# Update sequence state
bias = self.pair_to_sequence(pairwise_state)
# Self attention with bias + mlp.
y = self.layernorm_1(sequence_state)
y, _ = self.seq_attention(y, mask=mask, bias=bias)
sequence_state = sequence_state + self.drop(y)
sequence_state = self.mlp_seq(sequence_state)
# Update pairwise state
pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
# Axial attention with triangular bias.
tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask))
pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask))
pairwise_state = pairwise_state + self.row_drop(
self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
)
pairwise_state = pairwise_state + self.col_drop(
self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
)
# MLP over pairs.
pairwise_state = self.mlp_pair(pairwise_state)
return sequence_state, pairwise_state
| EsmFoldTriangularSelfAttentionBlock |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 56377,
"end": 57288
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
key: Optional[str] = Field(
None,
description=(
"[Databricks Runtime"
" version](https://docs.databricks.com/dev-tools/api/latest/index.html#programmatic-version)"
" key, for example `7.3.x-scala2.12`. The value that must be provided as"
" the `spark_version` when creating a new cluster. The exact runtime"
" version may change over time for a “wildcard” version (that is,"
" `7.3.x-scala2.12` is a “wildcard” version) with minor bug fixes."
),
)
name: Optional[str] = Field(
None,
description=(
"A descriptive name for the runtime version, for example “Databricks"
" Runtime 7.3 LTS”."
),
)
| SparkVersion |
python | ApeWorX__ape | src/ape_pm/dependency.py | {
"start": 1195,
"end": 3026
} | class ____(DependencyAPI):
"""
A dependency located on the local machine.
"""
local: Path
"""
The root path (and API defining key) to the dependency files.
"""
version: Optional[str] = None
"""
Specified version.
"""
@model_validator(mode="before")
@classmethod
def validate_local_path(cls, model):
# Resolves the relative path so if the dependency API
# data moves, it will still work.
path = Path(model["local"]).expanduser()
model["local"] = f"{path}"
# Automatically include `"name"`.
if "name" not in model:
model["name"] = path.stem
if path.is_absolute():
return model
elif "project" in model:
# Just in case relative paths didn't get resolved.
# Note: Generally, they should be resolved at model
# construction time, if parsing a config file normally.
project = model.pop("project")
model["local"] = (project / path).resolve()
return model
def __repr__(self) -> str:
path = clean_path(self.local)
return f"<LocalDependency local={path}, version={self.version_id}>"
@property
def package_id(self) -> str:
path = self.local
if in_tempdir(path):
# Avoids never-ending tmp paths.
return self.name
else:
return self.local.as_posix()
@property
def version_id(self) -> str:
return self.version or "local"
@property
def uri(self) -> str:
return self.local.as_uri()
def fetch(self, destination: Path):
if destination.is_dir():
destination = destination / self.name
_fetch_local(self.local, destination, config_override=self.config_override)
| LocalDependency |
python | django__django | tests/mail/custombackend.py | {
"start": 99,
"end": 448
} | class ____(BaseEmailBackend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_outbox = []
def send_messages(self, email_messages):
# Messages are stored in an instance variable for testing.
self.test_outbox.extend(email_messages)
return len(email_messages)
| EmailBackend |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/source_salesforce/streams.py | {
"start": 43150,
"end": 44062
} | class ____(Stream):
state_converter = IsoMillisConcurrentStreamStateConverter(is_sequential_state=False)
"""
Stream of sObjects' (Salesforce Objects) describe:
https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_describe.htm
"""
name = "Describe"
primary_key = "name"
def __init__(self, sf_api: Salesforce, catalog: ConfiguredAirbyteCatalog = None, **kwargs):
super().__init__(**kwargs)
self.sf_api = sf_api
if catalog:
self.sobjects_to_describe = [s.stream.name for s in catalog.streams if s.stream.name != self.name]
def read_records(self, **kwargs) -> Iterable[Mapping[str, Any]]:
"""
Yield describe response of SObjects defined in catalog as streams only.
"""
for sobject in self.sobjects_to_describe:
yield self.sf_api.describe(sobject=sobject)
| Describe |
python | scrapy__scrapy | scrapy/core/downloader/handlers/ftp.py | {
"start": 2201,
"end": 2847
} | class ____(Protocol):
def __init__(self, filename: bytes | None = None):
self.__filename: bytes | None = filename
self.body: BinaryIO = (
Path(filename.decode()).open("wb") if filename else BytesIO()
)
self.size: int = 0
def dataReceived(self, data: bytes) -> None:
self.body.write(data)
self.size += len(data)
@property
def filename(self) -> bytes | None:
return self.__filename
def close(self) -> None:
if self.filename:
self.body.close()
else:
self.body.seek(0)
_CODE_RE = re.compile(r"\d+")
| ReceivedDataProtocol |
python | pyinstaller__pyinstaller | tests/functional/modules/pyi_import_pyqt_uic_port/PyQt5/QtCore.py | {
"start": 135,
"end": 371
} | class ____:
def __init__(*args, **kw):
pass
PrefixPath = 1
BinariesPath = 2
@classmethod
def location(cls, val):
return "."
@classmethod
def isDebugBuild(cls):
return False
| QLibraryInfo |
python | walkccc__LeetCode | solutions/2357. Make Array Zero by Subtracting Equal Amounts/2357.py | {
"start": 0,
"end": 103
} | class ____:
def minimumOperations(self, nums: list[int]) -> int:
return len(set(nums) - {0})
| Solution |
python | facebook__pyre-check | client/language_server/features.py | {
"start": 973,
"end": 1409
} | class ____(enum.Enum):
DISABLED = "disabled"
FUNCTION_LEVEL = "function_level"
EXPRESSION_LEVEL = "expression_level"
# User-facing features
StatusUpdatesAvailability = _Availability
TypeErrorsAvailability = _Availability
UnsavedChangesAvailability = _Availability
# Telemetry: is the editor able to forward events somewhere?
TelemetryAvailability = _Availability
@dataclasses.dataclass(frozen=True)
| TypeCoverageAvailability |
python | django-extensions__django-extensions | tests/management/commands/test_describe_form.py | {
"start": 151,
"end": 502
} | class ____(TestCase):
"""Tests for describe_form command exceptions."""
def test_should_raise_CommandError_if_invalid_arg(self):
with self.assertRaisesRegex(
CommandError, "Need application and model name in the form: appname.model"
):
call_command("describe_form", "testapp")
| DescribeFormExceptionsTests |
python | getsentry__sentry | src/sentry/grouping/grouptype.py | {
"start": 573,
"end": 831
} | class ____(DetectorHandler):
def evaluate_impl(self, data_packet: DataPacket[T]) -> GroupedDetectorEvaluationResult:
# placeholder
return GroupedDetectorEvaluationResult(result={}, tainted=False)
@dataclass(frozen=True)
| ErrorDetectorHandler |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 13440,
"end": 18158
} | class ____(base_classes.Book):
def __init__(self, app, name_or_index):
self._app = app
self.xl = app.xl.workbooks[name_or_index]
@property
def app(self):
return self._app
@property
def api(self):
return self.xl
def json(self):
raise NotImplementedError()
@property
def name(self):
return self.xl.name.get()
@property
def sheets(self):
return Sheets(self)
def close(self):
self.xl.close(saving=kw.no)
def save(self, path, password):
saved_path = self.xl.properties().get(kw.path)
source_ext = os.path.splitext(self.name)[1] if saved_path else None
target_ext = os.path.splitext(path)[1] if path else ".xlsx"
if saved_path and source_ext == target_ext:
file_format = self.xl.properties().get(kw.file_format)
else:
ext_to_file_format = {
".xlsx": kw.Excel_XML_file_format,
".xlsm": kw.macro_enabled_XML_file_format,
".xlsb": kw.Excel_binary_file_format,
".xltm": kw.macro_enabled_template_file_format,
".xltx": kw.template_file_format,
".xlam": kw.add_in_file_format,
".xls": kw.Excel98to2004_file_format,
".xlt": kw.Excel98to2004_template_file_format,
".xla": kw.Excel98to2004_add_in_file_format,
}
file_format = ext_to_file_format[target_ext]
if (saved_path != "") and (path is None):
# Previously saved: Save under existing name
self.xl.save(timeout=-1)
elif (
(saved_path != "") and (path is not None) and (os.path.split(path)[0] == "")
):
# Save existing book under new name in cwd if no path has been provided
save_as_name = path
path = os.path.join(os.getcwd(), path)
hfs_path = posix_to_hfs_path(os.path.realpath(path))
self.xl.save_workbook_as(
filename=hfs_path,
overwrite=True,
file_format=file_format,
timeout=-1,
password=password,
)
self.xl = self.app.xl.workbooks[save_as_name]
elif (saved_path == "") and (path is None):
# Previously unsaved: Save under current name in current working directory
save_as_name = self.xl.name.get() + ".xlsx"
path = os.path.join(os.getcwd(), save_as_name)
hfs_path = posix_to_hfs_path(os.path.realpath(path))
self.xl.save_workbook_as(
filename=hfs_path,
overwrite=True,
file_format=file_format,
timeout=-1,
password=password,
)
self.xl = self.app.xl.workbooks[save_as_name]
elif path:
# Save under new name/location
hfs_path = posix_to_hfs_path(os.path.realpath(path))
self.xl.save_workbook_as(
filename=hfs_path,
overwrite=True,
file_format=file_format,
timeout=-1,
password=password,
)
self.xl = self.app.xl.workbooks[os.path.basename(path)]
@property
def fullname(self):
display_alerts = self.app.display_alerts
self.app.display_alerts = False
# This causes a pop-up if there's a pw protected sheet, see #1377
path = self.xl.properties().get(kw.full_name)
if "://" in path:
config = read_config_sheet(xlwings.Book(impl=self))
self.app.display_alerts = display_alerts
return fullname_url_to_local_path(
url=path,
sheet_onedrive_consumer_config=config.get("ONEDRIVE_CONSUMER_MAC"),
sheet_onedrive_commercial_config=config.get("ONEDRIVE_COMMERCIAL_MAC"),
sheet_sharepoint_config=config.get("SHAREPOINT_MAC"),
)
else:
self.app.display_alerts = display_alerts
return path
@property
def names(self):
return Names(parent=self, xl=self.xl.named_items)
def activate(self):
self.xl.activate_object()
def to_pdf(self, path, quality=None):
# quality parameter for compatibility
hfs_path = posix_to_hfs_path(path)
display_alerts = self.app.display_alerts
self.app.display_alerts = False
if Path(path).exists():
# Errors out with Parameter error (OSERROR: -50) otherwise
os.unlink(path)
self.xl.save(in_=hfs_path, as_=kw.PDF_file_format)
self.app.display_alerts = display_alerts
| Book |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 11837,
"end": 12214
} | class ____(LTTextGroup):
def analyze(self, laparams):
LTTextGroup.analyze(self, laparams)
# reorder the objects from top-left to bottom-right.
self._objs = csort(self._objs, key=lambda obj:
(1-laparams.boxes_flow)*(obj.x0) -
(1+laparams.boxes_flow)*(obj.y0+obj.y1))
return
| LTTextGroupLRTB |
python | jazzband__django-model-utils | model_utils/tracker.py | {
"start": 769,
"end": 872
} | class ____(Descriptor[T]):
def __delete__(self, instance: object) -> None:
...
| FullDescriptor |
python | getsentry__sentry | tests/sentry/integrations/jira/test_sentry_installation.py | {
"start": 556,
"end": 931
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.path = absolute_uri("extensions/jira/ui-hook/") + "?xdm_e=base_url"
self.user.name = "Sentry Admin"
self.user.save()
self.integration = self.create_provider_integration(provider="jira", name="Example Jira")
@control_silo_test
| JiraSentryInstallationViewTestCase |
python | jina-ai__jina | jina/jaml/helper.py | {
"start": 532,
"end": 2576
} | class ____(FullConstructor):
"""Convert List into tuple when doing hashing."""
def get_hashable_key(self, key):
"""
Get the hash value of key.
:param key: key value to be hashed.
:return: Hash value of key.
"""
try:
hash(key)
except:
if isinstance(key, list):
for i in range(len(key)):
if not isinstance(key[i], collections.abc.Hashable):
key[i] = self.get_hashable_key(key[i])
key = tuple(key)
return key
raise ValueError(f'unhashable key: {key}')
return key
def construct_mapping(self, node, deep=True):
"""
Build the mapping from node.
:param node: the node to traverse
:param deep: required param from YAML constructor
:return: Mapped data
"""
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._construct_mapping(node, deep=deep)
def _construct_mapping(self, node, deep=True):
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found %s' % node.id,
node.start_mark,
)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=True)
if not isinstance(key, collections.abc.Hashable):
try:
key = self.get_hashable_key(key)
except Exception as exc:
raise ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key (%s)' % exc,
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
| JinaConstructor |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_india_zip.py | {
"start": 1724,
"end": 5176
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid India zipcodes.
See https://pypi.org/project/indiapins/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_india_zip": ["421306", "421301", "400078", "400051"],
"invalid_india_zip": ["-10000", "1234", "099999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_india_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_india_zip", "mostly": 1},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_india_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"typed-entities",
"india",
"pincode",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@prachijain136",
"@jainamshahh", # Don't forget to add your github handle here!
],
"requirements": ["indiapins"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIndiaZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidIndiaZip |
python | getsentry__sentry | tests/sentry/integrations/repository/metric_alert/test_new_metric_alert_notification_message.py | {
"start": 251,
"end": 2826
} | class ____:
@classmethod
def _raises_error_for_obj(
cls, obj: NewMetricAlertNotificationMessage, expected_error: type[Exception]
) -> None:
error = obj.get_validation_error()
assert error is not None
with pytest.raises(expected_error):
raise error
def test_returns_error_when_message_identifier_has_error_code(self) -> None:
obj = NewMetricAlertNotificationMessage(
message_identifier="abc",
error_code=400,
)
self._raises_error_for_obj(obj, MessageIdentifierWithErrorValidationError)
def test_returns_error_when_message_identifier_has_error_details(self) -> None:
obj = NewMetricAlertNotificationMessage(
message_identifier="abc",
error_details={"some_key": 123},
)
self._raises_error_for_obj(obj, MessageIdentifierWithErrorValidationError)
def test_returns_error_when_message_identifier_has_error(self) -> None:
obj = NewMetricAlertNotificationMessage(
message_identifier="abc",
error_code=400,
error_details={"some_key": 123},
)
self._raises_error_for_obj(obj, MessageIdentifierWithErrorValidationError)
def test_returns_error_when_message_identifier_does_not_have_incident(self) -> None:
obj = NewMetricAlertNotificationMessage(
message_identifier="abc",
trigger_action_id=123,
)
self._raises_error_for_obj(obj, IncidentAndTriggerActionValidationError)
def test_returns_error_when_message_identifier_does_not_have_trigger_action(self) -> None:
obj = NewMetricAlertNotificationMessage(
message_identifier="abc",
incident_id=123,
)
self._raises_error_for_obj(obj, IncidentAndTriggerActionValidationError)
def test_returns_error_when_trigger_action_is_missing(self) -> None:
obj = NewMetricAlertNotificationMessage(
incident_id=123,
)
self._raises_error_for_obj(obj, IncidentAndTriggerActionValidationError)
def test_returns_error_when_incident_is_missing(self) -> None:
obj = NewMetricAlertNotificationMessage(
trigger_action_id=123,
)
self._raises_error_for_obj(obj, IncidentAndTriggerActionValidationError)
def test_simple(self) -> None:
obj = NewMetricAlertNotificationMessage(
incident_id=123,
trigger_action_id=123,
)
error = obj.get_validation_error()
assert error is None
| TestGetValidationError |
python | huggingface__transformers | src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py | {
"start": 6578,
"end": 21143
} | class ____(BaseImageProcessorFast):
downsample_factor = 2
do_image_splitting = True
min_tiles = 2
max_tiles = 10
use_thumbnail = True
min_image_tokens = 64
max_image_tokens = 256
encoder_patch_size = 16
tile_size = 512
max_pixels_tolerance = 2.0
do_resize = True
size = {"height": 512, "width": 512}
resample = PILImageResampling.BILINEAR
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_pad = True
return_row_col_info = False
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
valid_kwargs = Lfm2VlImageProcessorKwargs
model_input_names = ["pixel_values", "pixel_attention_mask", "spatial_shapes"]
def __init__(self, **kwargs: Unpack[Lfm2VlImageProcessorKwargs]):
super().__init__(**kwargs)
max_thumbnail_image_patches = self.max_image_tokens * self.downsample_factor**2
tile_size_patches = (self.tile_size // self.encoder_patch_size) ** 2 if self.do_image_splitting else 0
self.max_num_patches = max(
max_thumbnail_image_patches,
tile_size_patches,
)
@lru_cache(maxsize=256)
def _target_ratios(self, min_tiles: int, max_tiles: int) -> list[tuple[int, int]]:
ratios = [
(w, h)
for n in range(min_tiles, max_tiles + 1)
for w in range(1, n + 1)
for h in range(1, n + 1)
if min_tiles <= w * h <= max_tiles
]
return sorted(set(ratios), key=lambda x: x[0] * x[1])
def _get_grid_layout(
self,
height: int,
width: int,
min_tiles: int,
max_tiles: int,
tile_size: int,
) -> tuple[int, int]:
aspect_ratio = width / height
target_ratios = self._target_ratios(min_tiles, max_tiles)
# find best matching grid configuration
grid_width, grid_height = find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, tile_size)
target_width = tile_size * grid_width
target_height = tile_size * grid_height
total_patches = grid_width * grid_height
return grid_width, grid_height, target_width, target_height, total_patches
def crop_image_to_patches(
self,
image: "torch.Tensor",
min_tiles: int,
max_tiles: int,
tile_size: int,
use_thumbnail: bool,
thumbnail_size: tuple[int],
interpolation: "F.InterpolationMode" = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Processes a high resolution image into patches.
This method splits a high resolution image into a grid of smaller patches while trying to maintain
the original aspect ratio. It finds the optimal grid configuration within the specified tile constraints.
"""
batch_size, num_channels, height, width = image.shape
grid_width, grid_height, target_width, target_height, total_patches = self._get_grid_layout(
height, width, min_tiles=min_tiles, max_tiles=max_tiles, tile_size=tile_size
)
resized_image = F.resize(
image, (target_height, target_width), interpolation=interpolation, antialias=antialias
)
# split the image into patches
processed_images = (
resized_image.unfold(2, size=tile_size, step=tile_size)
.unfold(3, size=tile_size, step=tile_size)
.contiguous()
.view(batch_size, num_channels, -1, tile_size, tile_size)
.permute(2, 0, 1, 3, 4)
.reshape(batch_size, -1, num_channels, tile_size, tile_size)
)
# Re-order processed images to a nested image structure, so it can be reordered back correctly
# Note that the images can't be stacked because the thumbnail image is of bigger size than patches
# Each image in sublist will be of shape (1, C, H, W)
processed_images = list(processed_images)
if use_thumbnail and grid_width * grid_height != 1:
total_patches += 1
thumbnail_image = F.resize(image, thumbnail_size, interpolation=interpolation, antialias=antialias)
for i in range(batch_size):
processed_images[i] = list(processed_images[i]) + list(thumbnail_image[i][None, ...])
return processed_images, grid_width, grid_height
# Adapted from Qwen-VL with minor differences
def smart_resize(
self,
height: int,
width: int,
downsample_factor: int,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
) -> tuple[int, int]:
"""
Rescales the image so that the following conditions are met:
1. Both dimensions (height and width) are divisible by 'encoder_patch_size' * 'downsample_factor'.
This ensures no padding is needed in the downsampling step.
2. The total number of pixels is within the range ['smart_resize_min_pixels', 'smart_resize_max_pixels'].
3. The aspect ratio of the image is maintained as closely as possible.
"""
total_factor = encoder_patch_size * downsample_factor
smart_resize_min_pixels = min_image_tokens * encoder_patch_size**2 * downsample_factor**2
smart_resize_max_pixels = max_image_tokens * encoder_patch_size**2 * downsample_factor**2
h_bar = max(total_factor, round_by_factor(height, total_factor))
w_bar = max(total_factor, round_by_factor(width, total_factor))
if h_bar * w_bar > smart_resize_max_pixels:
beta = math.sqrt((height * width) / smart_resize_max_pixels)
math.floor(height / beta / total_factor) * total_factor
h_bar = max(total_factor, math.floor(height / beta / total_factor) * total_factor)
w_bar = max(total_factor, math.floor(width / beta / total_factor) * total_factor)
elif h_bar * w_bar < smart_resize_min_pixels:
beta = math.sqrt(smart_resize_min_pixels / (height * width))
h_bar = math.ceil(height * beta / total_factor) * total_factor
w_bar = math.ceil(width * beta / total_factor) * total_factor
return w_bar, h_bar
def _is_image_too_large(
self,
height: int,
width: int,
max_image_tokens: int,
encoder_patch_size: int,
downsample_factor: int,
max_pixels_tolerance: float,
) -> bool:
"""Check if the image is too large to be processed as one tile."""
total_factor = encoder_patch_size * downsample_factor
h_bar = max(encoder_patch_size, round_by_factor(height, total_factor))
w_bar = max(encoder_patch_size, round_by_factor(width, total_factor))
return h_bar * w_bar > max_image_tokens * encoder_patch_size**2 * downsample_factor**2 * max_pixels_tolerance
def resize_and_split(
self,
images: "torch.Tensor",
downsample_factor: int,
min_tiles: int,
max_tiles: int,
use_thumbnail: bool,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
tile_size: int,
max_pixels_tolerance: float,
interpolation: "F.InterpolationMode",
) -> "torch.Tensor":
batch_size, _, height, width = images.shape
do_image_splitting = not min_tiles == max_tiles == 1
is_image_large = self._is_image_too_large(
height=height,
width=width,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
downsample_factor=downsample_factor,
max_pixels_tolerance=max_pixels_tolerance,
)
new_width, new_height = self.smart_resize(
height=height,
width=width,
downsample_factor=downsample_factor,
min_image_tokens=min_image_tokens,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
)
# Big image will be cropped into patches and small images are just resized
if is_image_large and do_image_splitting:
images, num_cols, num_rows = self.crop_image_to_patches(
images,
min_tiles=min_tiles,
max_tiles=max_tiles,
tile_size=tile_size,
thumbnail_size=(new_height, new_width),
use_thumbnail=use_thumbnail,
interpolation=interpolation,
)
else:
num_rows = num_cols = 1
images = F.resize(images, (new_height, new_width), interpolation=interpolation)
# Make a list and treat it as single crop per image so it can be re-grouped back correctly
images = [[image] for image in images]
num_rows = [num_rows] * batch_size
num_cols = [num_cols] * batch_size
image_sizes = [[new_height, new_width]] * batch_size
return images, num_rows, num_cols, image_sizes
def _preprocess(
self,
images: ImageInput,
size: SizeDict,
interpolation: "F.InterpolationMode",
do_resize: bool,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
downsample_factor: int,
do_image_splitting: bool,
min_tiles: int,
max_tiles: int,
use_thumbnail: bool,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
tile_size: int,
max_pixels_tolerance: float,
return_tensors: Union[str, TensorType],
disable_grouping: bool,
do_pad: bool,
return_row_col_info: bool,
**kwargs,
) -> BatchFeature:
if not do_image_splitting:
min_tiles = 1
max_tiles = 1
logger.debug(
"Image splitting is disabled, setting min_tiles and max_tiles to 1. Set do_image_splitting=True to enable splitting."
)
if do_image_splitting and min_tiles > max_tiles:
raise ValueError("min_tiles must be less than or equal to max_tiles")
max_thumbnail_image_patches = max_image_tokens * downsample_factor**2
tile_size_patches = (tile_size // encoder_patch_size) ** 2 if do_image_splitting else 0
max_num_patches = max(
max_thumbnail_image_patches,
tile_size_patches,
)
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
resized_image_sizes = {}
rows_grouped, cols_grouped = {}, {}
for shape, stacked_images in grouped_images.items():
num_rows = [1] * stacked_images.shape[0]
num_cols = [1] * stacked_images.shape[0]
height, width = stacked_images.shape[-2:]
image_sizes = [[height, width]] * stacked_images.shape[0]
do_resize = True
if do_resize:
stacked_images, num_rows, num_cols, image_sizes = self.resize_and_split(
stacked_images,
downsample_factor=downsample_factor,
min_tiles=min_tiles,
max_tiles=max_tiles,
use_thumbnail=use_thumbnail,
min_image_tokens=min_image_tokens,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
tile_size=tile_size,
max_pixels_tolerance=max_pixels_tolerance,
interpolation=interpolation,
)
rows_grouped[shape] = num_rows
cols_grouped[shape] = num_cols
resized_image_sizes[shape] = image_sizes
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
batch_rows = reorder_images(rows_grouped, grouped_images_index)
batch_cols = reorder_images(cols_grouped, grouped_images_index)
resized_image_sizes = reorder_images(resized_image_sizes, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping, is_nested=True
)
processed_images_grouped = {}
processed_masks, processed_spatial_shapes = {}, {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
batch_size, *_, height, width = stacked_images.shape
num_patches_height = height // encoder_patch_size
num_patches_width = width // encoder_patch_size
stacked_images = convert_image_to_patches(stacked_images, encoder_patch_size)
processed_spatial_shapes[shape] = [[num_patches_height, num_patches_width]] * batch_size
if do_pad:
stacked_images, pixel_mask = pad_along_first_dim(stacked_images, max_num_patches)
processed_masks[shape] = [pixel_mask] * batch_size
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
data = {"pixel_values": torch.cat([torch.stack(images) for images in processed_images])}
if do_pad:
processed_masks = reorder_images(processed_masks, grouped_images_index, is_nested=True)
processed_spatial_shapes = reorder_images(processed_spatial_shapes, grouped_images_index, is_nested=True)
processed_masks = torch.cat([torch.stack(masks) for masks in processed_masks])
processed_spatial_shapes = torch.cat(
[torch.tensor(spatial_shape) for spatial_shape in processed_spatial_shapes]
)
data.update({"pixel_attention_mask": processed_masks, "spatial_shapes": processed_spatial_shapes})
if return_row_col_info:
data["image_rows"] = batch_rows
data["image_cols"] = batch_cols
data["image_sizes"] = resized_image_sizes
encoding = BatchFeature(data=data, tensor_type=return_tensors)
return encoding
__all__ = ["Lfm2VlImageProcessorFast"]
| Lfm2VlImageProcessorFast |
python | celery__celery | examples/django/demoapp/migrations/0001_initial.py | {
"start": 92,
"end": 486
} | class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Widget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
],
),
]
| Migration |
python | ipython__ipython | IPython/core/completer.py | {
"start": 14676,
"end": 17104
} | class ____:
"""
Completion object used and returned by IPython completers.
.. warning::
Unstable
This function is unstable, API may change without warning.
It will also raise unless use in proper context manager.
This act as a middle ground :any:`Completion` object between the
:any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
object. While Jedi need a lot of information about evaluator and how the
code should be ran/inspected, PromptToolkit (and other frontend) mostly
need user facing information.
- Which range should be replaced replaced by what.
- Some metadata (like completion type), or meta information to displayed to
the use user.
For debugging purpose we can also store the origin of the completion (``jedi``,
``IPython.python_matches``, ``IPython.magics_matches``...).
"""
__slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
def __init__(
self,
start: int,
end: int,
text: str,
*,
type: Optional[str] = None,
_origin="",
signature="",
) -> None:
warnings.warn(
"``Completion`` is a provisional API (as of IPython 6.0). "
"It may change without warnings. "
"Use in corresponding context manager.",
category=ProvisionalCompleterWarning,
stacklevel=2,
)
self.start = start
self.end = end
self.text = text
self.type = type
self.signature = signature
self._origin = _origin
def __repr__(self):
return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
(self.start, self.end, self.text, self.type or '?', self.signature or '?')
def __eq__(self, other) -> bool:
"""
Equality and hash do not hash the type (as some completer may not be
able to infer the type), but are use to (partially) de-duplicate
completion.
Completely de-duplicating completion is a bit tricker that just
comparing as it depends on surrounding text, which Completions are not
aware of.
"""
return self.start == other.start and \
self.end == other.end and \
self.text == other.text
def __hash__(self):
return hash((self.start, self.end, self.text))
| Completion |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 13911,
"end": 14006
} | class ____(Egg):
def __init__(self, thing: int) -> None:
super().__init__(thing)
| Spam |
python | joke2k__faker | faker/providers/job/ja_JP/__init__.py | {
"start": 42,
"end": 1125
} | class ____(BaseProvider):
"""
source: https://ja.wikipedia.org/wiki/%E8%81%B7%E6%A5%AD%E4%B8%80%E8%A6%A7
"""
jobs = [
"アイドル",
"アーティスト",
"アートディレクター",
"アナウンサー",
"アニメーター",
"医師",
"イラストレーター",
"医療事務員",
"ウェディングプランナー",
"ウェブデザイナー",
"占い師",
"運転士",
"映画監督",
"営業",
"栄養士",
"エステティシャン",
"絵本作家",
"演歌歌手",
"エンジニア" "演奏家",
"お笑い芸人",
"音楽家",
"音響技術者",
"介護ヘルパー",
"気象予報士",
"脚本家",
"救急救命士",
"行政書士",
"グラフィックデザイナー",
"経営者",
"検察官",
"ゲームクリエイター",
"建築家",
"航海士",
"コピーライター",
"高等学校教員",
"公認会計士",
"公務員",
"裁判官",
"作曲家",
"歯科医師",
"司法書士",
"小説家",
"寿司職人",
"測量士",
"大学教授",
"調理師",
"電気工事士",
"農家",
"配管工",
"バスガイド",
"花火師",
"漫画家",
"モデル",
"薬剤師",
"YouTuber",
"和紙職人",
]
| Provider |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 2223,
"end": 2947
} | class ____(FilterSharing, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-filter-sharing/#api-rest-api-3-filter-id-permission-post
"""
def generate(self):
filters_stream = Filters(authenticator=self._session.auth, domain=self._domain)
for filters in filters_stream.read_records(sync_mode=SyncMode.full_refresh):
for index in range(random.randrange(4)):
group_name = random.choice(["Test group 0", "Test group 1", "Test group 2"])
payload = json.dumps({"type": "group", "groupname": group_name})
self.generate_record(payload, stream_slice={"filter_id": filters["id"]})
| FilterSharingGenerator |
python | getsentry__sentry | src/social_auth/backends/asana.py | {
"start": 425,
"end": 950
} | class ____(OAuthBackend):
"""Asana OAuth authentication backend"""
name = "asana"
EXTRA_DATA = [
("email", "email"),
("name", "full_name"),
("gid", "id"),
("refresh_token", "refresh_token"),
]
ID_KEY = "gid"
def get_user_details(self, response):
"""Return user details from Asana account"""
return {
"email": response.get("email"),
"id": response.get("gid"),
"full_name": response.get("name"),
}
| AsanaBackend |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-wordlift/llama_index/readers/wordlift/base.py | {
"start": 819,
"end": 10555
} | class ____(BaseReader):
"""
A reader class for fetching and transforming data from WordLift GraphQL API.
Args:
endpoint (str): The API endpoint URL.
headers (dict): The request headers.
query (str): The GraphQL query.
fields (str): The fields to extract from the API response.
configure_options (dict): Additional configuration options.
page (int): The page number.
rows (int): The number of rows per page.
Attributes:
endpoint (str): The API endpoint URL.
headers (dict): The request headers.
query (str): The GraphQL query.
fields (str): The fields to extract from the API response.
configure_options (dict): Additional configuration options.
page (int): The page number.
rows (int): The number of rows per page.
"""
def __init__(self, endpoint, headers, query, fields, configure_options) -> None:
self.endpoint = endpoint
self.headers = headers
self.query = query
self.fields = fields
self.configure_options = configure_options
def fetch_data(self) -> dict:
"""
Fetches data from the WordLift GraphQL API.
Returns:
dict: The API response data.
Raises:
APIConnectionError: If there is an error connecting to the API.
"""
try:
query = self.alter_query()
response = requests.post(
self.endpoint, json={"query": query}, headers=self.headers
)
response.raise_for_status()
data = response.json()
if ERRORS_KEY in data:
raise APICallError(data[ERRORS_KEY])
return data
except requests.exceptions.RequestException as e:
logging.error("Error connecting to the API:", exc_info=True)
raise APICallError("Error connecting to the API") from e
def transform_data(self, data: dict) -> List[Document]:
"""
Transforms the fetched data into a list of Document objects.
Args:
data (dict): The API response data.
Returns:
List[Document]: The list of transformed documents.
Raises:
DataTransformError: If there is an error transforming the data.
"""
try:
data = data[DATA_KEY][self.fields]
documents = []
text_fields = self.configure_options.get("text_fields", [])
metadata_fields = self.configure_options.get("metadata_fields", [])
for item in data:
if not all(key in item for key in text_fields):
logging.warning(
f"Skipping document due to missing text fields: {item}"
)
continue
row = {}
for key, value in item.items():
if key in text_fields or key in metadata_fields:
row[key] = value
else:
row[key] = clean_value(value)
text_parts = [
get_separated_value(row, field.split("."))
for field in text_fields
if get_separated_value(row, field.split(".")) is not None
]
text_parts = flatten_list(text_parts)
text = " ".join(text_parts)
extra_info = {}
for field in metadata_fields:
field_keys = field.split(".")
value = get_separated_value(row, field_keys)
if value is None:
logging.warning(f"Using default value for {field}")
value = "n.a"
if isinstance(value, list) and len(value) != 0:
value = value[0]
if is_url(value) and is_valid_html(value):
value = value.replace("\n", "")
extra_info[field] = value
else:
cleaned_value = clean_value(value)
cleaned_value = cleaned_value.replace("\n", "")
extra_info[field] = cleaned_value
text = text.replace("\n", "")
plain_text = re.sub("<.*?>", "", text)
document = Document(text=plain_text, extra_info=extra_info)
documents.append(document)
return documents
except Exception as e:
logging.error("Error transforming data:", exc_info=True)
raise DataTransformError("Error transforming data") from e
def load_data(self) -> List[Document]:
"""
Loads the data by fetching and transforming it.
Returns:
List[Document]: The list of loaded documents.
"""
try:
data = self.fetch_data()
return self.transform_data(data)
except (APICallError, DataTransformError):
logging.error("Error loading data:", exc_info=True)
raise
def alter_query(self):
"""
Alters the GraphQL query by adding pagination arguments.
Returns:
str: The altered GraphQL query with pagination arguments.
"""
from graphql import parse, print_ast
from graphql.language.ast import ArgumentNode, IntValueNode, NameNode
DEFAULT_PAGE = 0
DEFAULT_ROWS = 500
query = self.query
page = DEFAULT_PAGE
rows = DEFAULT_ROWS
ast = parse(query)
field_node = ast.definitions[0].selection_set.selections[0]
if not any(arg.name.value == "page" for arg in field_node.arguments):
page_argument = ArgumentNode(
name=NameNode(value="page"), value=IntValueNode(value=page)
)
rows_argument = ArgumentNode(
name=NameNode(value="rows"), value=IntValueNode(value=rows)
)
field_node.arguments = (*field_node.arguments, page_argument, rows_argument)
return print_ast(ast)
def is_url(text: str) -> bool:
"""
Checks if the given text is a URL.
Args:
text (str): The text to check.
Returns:
bool: True if the text is a URL, False otherwise.
"""
try:
result = urlparse(text)
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_valid_html(content: str) -> bool:
"""
Checks if the given content is a valid HTML document.
"""
if content is None:
return False
if is_url(content):
try:
response = requests.get(content)
if response.status_code == 200:
html_content = response.text
return (
BeautifulSoup(html_content, "html.parser").find("html") is not None
)
else:
return False
except (
requests.exceptions.RequestException,
requests.exceptions.ConnectionError,
):
# If there is a connection error or the URL doesn't resolve, skip it
return False
return BeautifulSoup(content, "html.parser").find("html") is not None
@staticmethod
def clean_value(x: any) -> any:
"""
Cleans a value by checking if it's a URL and fetching its content using the WordLift Inspect API.
"""
if x is not None and not isinstance(x, list):
return clean_html(x)
return x
@staticmethod
def clean_html(text: str) -> str:
"""
Cleans HTML content by fetching its text representation using BeautifulSoup.
"""
if text is None:
return ""
if isinstance(text, dict):
return str(text)
if isinstance(text, str):
try:
if is_url(text):
response = requests.get(text)
if response.status_code == 200:
html_content = response.text
soup = BeautifulSoup(html_content, "lxml")
cleaned_text = soup.get_text()
else:
cleaned_text = ""
elif os.path.isfile(text):
with open(text) as file:
soup = BeautifulSoup(file, "lxml")
cleaned_text = soup.get_text()
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
soup = BeautifulSoup(text, "lxml")
cleaned_text = soup.get_text()
return cleaned_text
except (
requests.exceptions.RequestException,
requests.exceptions.ConnectionError,
):
# If there is a connection error or the URL doesn't resolve, skip it
return ""
return str(text)
@staticmethod
def get_separated_value(item: dict, field_keys: List[str]) -> any:
"""
Retrieves the metadata value from the nested item based on field keys.
"""
if not field_keys:
return item
key = field_keys[0]
if isinstance(item, list):
if len(item) == 0:
return None
else:
item = item[0]
if isinstance(item, dict) and key in item:
return get_separated_value(item[key], field_keys[1:])
return None
@staticmethod
def flatten_list(lst):
"""
Flattens a nested list.
"""
if lst is None:
return []
flattened = []
for item in lst:
if isinstance(item, list):
flattened.extend(flatten_list(item))
else:
flattened.append(item)
return flattened
| WordLiftLoader |
python | fluentpython__example-code-2e | 10-dp-1class-func/untyped/strategy.py | {
"start": 1302,
"end": 2600
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self) # <1>
return self.total() - discount
def __repr__(self):
return f'<Order total: {self.total():.2f} due: {self.due():.2f}>'
# <2>
def fidelity_promo(order): # <3>
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# end::STRATEGY[]
| Order |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 13444,
"end": 17242
} | class ____(Metric):
"""Encapsulates metrics that perform a reduce operation on the values.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self, reduction, name, dtype=None):
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the metric.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
try:
values = math_ops.cast(values, self._dtype)
except (ValueError, TypeError):
msg = ('The output of a metric function can only be a single Tensor. '
'Got: %s' % (values,))
if isinstance(values, dict):
msg += ('. To return a dict of values, implement a custom Metric '
'subclass.')
raise RuntimeError(msg)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = backend.ndim(values)
weight_ndim = backend.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
| Reduce |
python | openai__openai-python | src/openai/types/completion_create_params.py | {
"start": 7023,
"end": 7650
} | class ____(CompletionCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to stream back partial progress.
If set, tokens will be sent as data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
"""
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
| CompletionCreateParamsStreaming |
python | kubernetes-client__python | kubernetes/client/models/v1_device_selector.py | {
"start": 383,
"end": 3328
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'cel': 'V1CELDeviceSelector'
}
attribute_map = {
'cel': 'cel'
}
def __init__(self, cel=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._cel = None
self.discriminator = None
if cel is not None:
self.cel = cel
@property
def cel(self):
"""Gets the cel of this V1DeviceSelector. # noqa: E501
:return: The cel of this V1DeviceSelector. # noqa: E501
:rtype: V1CELDeviceSelector
"""
return self._cel
@cel.setter
def cel(self, cel):
"""Sets the cel of this V1DeviceSelector.
:param cel: The cel of this V1DeviceSelector. # noqa: E501
:type: V1CELDeviceSelector
"""
self._cel = cel
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceSelector):
return True
return self.to_dict() != other.to_dict()
| V1DeviceSelector |
python | doocs__leetcode | solution/3100-3199/3184.Count Pairs That Form a Complete Day I/Solution.py | {
"start": 0,
"end": 235
} | class ____:
def countCompleteDayPairs(self, hours: List[int]) -> int:
cnt = Counter()
ans = 0
for x in hours:
ans += cnt[(24 - (x % 24)) % 24]
cnt[x % 24] += 1
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/core/property/either.py | {
"start": 1567,
"end": 4190
} | class ____(ParameterizedProperty[Any]):
""" Accept values according to a sequence of other property types.
Example:
.. code-block:: python
>>> class EitherModel(HasProps):
... prop = Either(Bool, Int, Auto)
...
>>> m = EitherModel()
>>> m.prop = True
>>> m.prop = 10
>>> m.prop = "auto"
>>> m.prop = 10.3 # ValueError !!
>>> m.prop = "foo" # ValueError !!
"""
def __init__(self, type_param0: TypeOrInst[Property[Any]], *type_params: TypeOrInst[Property[Any]],
default: Init[T] = Intrinsic, help: str | None = None) -> None:
super().__init__(type_param0, *type_params, default=default, help=help)
for tp in self.type_params:
self.alternatives.extend(tp.alternatives)
def transform(self, value: Any) -> Any:
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError(f"Could not transform {value!r}")
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if any(param.is_valid(value) for param in self.type_params):
return
from ...util.strings import nice_join
msg = "" if not detail else f"expected an element of either {nice_join([ str(param) for param in self.type_params ])}, got {value!r}"
raise ValueError(msg)
def wrap(self, value):
for tp in self.type_params:
value = tp.wrap(value)
return value
def replace(self, old: type[Property[Any]], new: Property[Any]) -> Property[Any]:
if self.__class__ == old:
return new
else:
params = [ type_param.replace(old, new) for type_param in self.type_params ]
return Either(*params)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(Either)
def _sphinx_type_link(obj: Either[Any]):
subtypes = ", ".join(type_link(x) for x in obj.type_params)
return f"{property_link(obj)}({subtypes})"
| Either |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 78731,
"end": 79001
} | class ____(AddPrefixSeries):
_parameters = ["frame", "suffix"]
operation = M.add_suffix
_preserves_partitioning_information = True
def _divisions(self):
return tuple(str(division) + self.suffix for division in self.frame.divisions)
| AddSuffixSeries |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/gather_nd_op_test.py | {
"start": 18043,
"end": 18817
} | class ____(test.Benchmark):
def benchmark_gather_nd_op(self):
shape = (100, 47, 18, 170, 13)
np.random.seed(127)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T
with session.Session():
t_params = variables.Variable(params)
t_indices = variables.Variable(indices)
gather_op = array_ops.gather_nd(t_params, t_indices)
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
self.evaluate(gather_op)
t1 = time.time()
for _ in range(1000):
self.evaluate(gather_op)
t2 = time.time()
self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)
if __name__ == "__main__":
test.main()
| GatherNdOpBenchmark |
python | jina-ai__jina | tests/integration/v2_api/test_docs_matrix_tail_pea.py | {
"start": 1173,
"end": 3120
} | class ____(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].chunks.extend(doc.chunks)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
@pytest.mark.timeout(60)
@pytest.mark.parametrize('num_replicas, num_shards', [(1, 1), (2, 2)])
def test_sharding_tail_pod(num_replicas, num_shards):
"""TODO(Maximilian): Make (1, 2) and (2, 1) also workable"""
port = random_port()
f = Flow(port=port).add(
uses=DummyExecutor,
replicas=num_replicas,
shards=num_shards,
uses_after=MatchMerger,
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=Document(matches=[Document()]), return_responses=True
)
assert len(results[0].docs[0].matches) == num_shards
def test_merging_head_pod():
port = random_port()
def multimodal_generator():
for i in range(0, 5):
document = Document()
document.chunks.append(Document(modality='1', content='1'))
document.chunks.append(Document(modality='2', content='2'))
yield document
f = (
Flow(port=port)
.add(uses={'jtype': 'DummyExecutor', 'with': {'mode': '1'}}, name='executor1')
.add(
uses={'jtype': 'DummyExecutor', 'with': {'mode': '2'}},
name='executor2',
needs='gateway',
)
.add(
uses_before=ChunkMerger, name='executor3', needs=['executor1', 'executor2']
)
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=multimodal_generator(), return_responses=True
)
assert len(results[0].docs[0].chunks) == 2
assert len(results[0].docs) == 5
| ChunkMerger |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc.py | {
"start": 43732,
"end": 49679
} | class ____(GoogleCloudBaseOperator):
"""
Delete a cluster in a project.
:param region: Required. The Cloud Dataproc region in which to handle the request (templated).
:param cluster_name: Required. The cluster name (templated).
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to (templated).
:param cluster_uuid: Optional. Specifying the ``cluster_uuid`` means the RPC should fail
if cluster with specified UUID does not exist.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the cluster status.
"""
template_fields: Sequence[str] = (
"project_id",
"region",
"cluster_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
region: str,
cluster_name: str,
project_id: str = PROVIDE_PROJECT_ID,
cluster_uuid: str | None = None,
request_id: str | None = None,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float = 1 * 60 * 60,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
):
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.project_id = project_id
self.region = region
self.cluster_name = cluster_name
self.cluster_uuid = cluster_uuid
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context) -> None:
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
operation = self._delete_cluster(hook)
if not self.deferrable:
hook.wait_for_operation(timeout=self.timeout, result_retry=self.retry, operation=operation)
self.log.info("Cluster deleted.")
else:
try:
hook.get_cluster(
project_id=self.project_id, region=self.region, cluster_name=self.cluster_name
)
except NotFound:
self.log.info("Cluster deleted.")
return
except Exception as e:
raise AirflowException(str(e))
end_time: float = time.time() + self.timeout
self.defer(
trigger=DataprocDeleteClusterTrigger(
gcp_conn_id=self.gcp_conn_id,
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> Any:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event and event["status"] == "error":
raise AirflowException(event["message"])
if event is None:
raise AirflowException("No event received in trigger callback")
self.log.info("Cluster deleted.")
def _delete_cluster(self, hook: DataprocHook):
self.log.info("Deleting cluster: %s", self.cluster_name)
return hook.delete_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster_uuid=self.cluster_uuid,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| DataprocDeleteClusterOperator |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 8168,
"end": 8453
} | class ____(Token):
__slots__ = ('encoding',)
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None, encoding=None):
# type: (Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.encoding = encoding
| StreamStartToken |
python | explosion__spaCy | spacy/tokens/underscore.py | {
"start": 234,
"end": 5590
} | class ____:
mutable_types = (dict, list, set)
doc_extensions: Dict[Any, Any] = {}
span_extensions: Dict[Any, Any] = {}
token_extensions: Dict[Any, Any] = {}
_extensions: Dict[str, Any]
_obj: Union["Doc", "Span", "Token"]
_start: Optional[int]
_end: Optional[int]
def __init__(
self,
extensions: Dict[str, Any],
obj: Union["Doc", "Span", "Token"],
start: Optional[int] = None,
end: Optional[int] = None,
):
object.__setattr__(self, "_extensions", extensions)
object.__setattr__(self, "_obj", obj)
# Assumption is that for doc values, _start and _end will both be None
# Span will set non-None values for _start and _end
# Token will have _start be non-None, _end be None
# This lets us key everything into the doc.user_data dictionary,
# (see _get_key), and lets us use a single Underscore class.
object.__setattr__(self, "_doc", obj.doc)
object.__setattr__(self, "_start", start)
object.__setattr__(self, "_end", end)
def __dir__(self) -> List[str]:
# Hack to enable autocomplete on custom extensions
extensions = list(self._extensions.keys())
return ["set", "get", "has"] + extensions
def __getattr__(self, name: str) -> Any:
if name not in self._extensions:
raise AttributeError(Errors.E046.format(name=name))
default, method, getter, setter = self._extensions[name]
if getter is not None:
return getter(self._obj)
elif method is not None:
method_partial = functools.partial(method, self._obj)
# Hack to port over docstrings of the original function
# See https://stackoverflow.com/q/27362727/6400719
method_docstring = method.__doc__ or ""
method_docstring_prefix = (
"This method is a partial function and its first argument "
"(the object it's called on) will be filled automatically. "
)
method_partial.__doc__ = method_docstring_prefix + method_docstring
return method_partial
else:
key = self._get_key(name)
if key in self._doc.user_data:
return self._doc.user_data[key]
elif isinstance(default, self.mutable_types):
# Handle mutable default arguments (see #2581)
new_default = copy.copy(default)
self.__setattr__(name, new_default)
return new_default
return default
def __setattr__(self, name: str, value: Any):
if name not in self._extensions:
raise AttributeError(Errors.E047.format(name=name))
default, method, getter, setter = self._extensions[name]
if setter is not None:
return setter(self._obj, value)
else:
self._doc.user_data[self._get_key(name)] = value
def set(self, name: str, value: Any):
return self.__setattr__(name, value)
def get(self, name: str) -> Any:
return self.__getattr__(name)
def has(self, name: str) -> bool:
return name in self._extensions
def _get_key(self, name: str) -> Tuple[str, str, Optional[int], Optional[int]]:
return ("._.", name, self._start, self._end)
@classmethod
def get_state(cls) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]:
return cls.token_extensions, cls.span_extensions, cls.doc_extensions
@classmethod
def load_state(
cls, state: Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]
) -> None:
cls.token_extensions, cls.span_extensions, cls.doc_extensions = state
def get_ext_args(**kwargs: Any):
"""Validate and convert arguments. Reused in Doc, Token and Span."""
default = kwargs.get("default")
getter = kwargs.get("getter")
setter = kwargs.get("setter")
method = kwargs.get("method")
if getter is None and setter is not None:
raise ValueError(Errors.E089)
valid_opts = ("default" in kwargs, method is not None, getter is not None)
nr_defined = sum(t is True for t in valid_opts)
if nr_defined != 1:
raise ValueError(Errors.E083.format(nr_defined=nr_defined))
if setter is not None and not hasattr(setter, "__call__"):
raise ValueError(Errors.E091.format(name="setter", value=repr(setter)))
if getter is not None and not hasattr(getter, "__call__"):
raise ValueError(Errors.E091.format(name="getter", value=repr(getter)))
if method is not None and not hasattr(method, "__call__"):
raise ValueError(Errors.E091.format(name="method", value=repr(method)))
return (default, method, getter, setter)
def is_writable_attr(ext):
"""Check if an extension attribute is writable.
ext (tuple): The (default, getter, setter, method) tuple available via
{Doc,Span,Token}.get_extension.
RETURNS (bool): Whether the attribute is writable.
"""
default, method, getter, setter = ext
# Extension is writable if it has a setter (getter + setter), if it has a
# default value (or, if its default value is none, none of the other values
# should be set).
if setter is not None or default is not None or all(e is None for e in ext):
return True
return False
| Underscore |
python | Lightning-AI__lightning | tests/tests_pytorch/accelerators/test_xla.py | {
"start": 7817,
"end": 11987
} | class ____(BoringModel):
def __init__(self):
super(BoringModel, self).__init__()
self.layer = nn.Linear(32, 10, bias=False)
self.net_a = SubModule(self.layer)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.net_b = SubModule(self.layer)
def forward(self, x):
x = self.net_a(x)
x = self.layer_2(x)
return self.net_b(x)
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_auto_parameters_tying_tpus_nested_module(tmp_path):
model = NestedModule()
trainer = Trainer(default_root_dir=tmp_path, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer.fit(model)
assert torch.all(torch.eq(model.net_a.layer.weight, model.net_b.layer.weight))
def test_tpu_invalid_raises(tpu_available, mps_count_0):
strategy = DDPStrategy(accelerator=XLAAccelerator(), precision_plugin=XLAPrecision())
with pytest.raises(ValueError, match="XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy`"):
Trainer(strategy=strategy, devices=8)
accelerator = XLAAccelerator()
with pytest.raises(TypeError, match="can only work with the `XLAPrecision` plugin"):
XLAStrategy(accelerator=accelerator, precision_plugin=Precision())
accelerator = XLAAccelerator()
strategy = DDPStrategy(accelerator=accelerator, precision_plugin=XLAPrecision())
with pytest.raises(
ValueError, match="The `XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy` or `XLAStrategy"
):
Trainer(strategy=strategy, devices=8)
@RunIf(skip_windows=True)
@mock.patch("lightning.pytorch.strategies.xla.XLAStrategy.set_world_ranks")
def test_xla_checkpoint_plugin_being_default(_, tpu_available):
trainer = Trainer(accelerator="tpu", devices=8)
assert isinstance(trainer.strategy.checkpoint_io, XLACheckpointIO)
@RunIf(tpu=True)
@patch("lightning.pytorch.strategies.xla.XLAStrategy.root_device")
def test_xla_mp_device_dataloader_attribute(_, monkeypatch):
dataset = RandomDataset(32, 64)
dataloader = DataLoader(dataset)
strategy = XLAStrategy()
isinstance_return = True
import torch_xla.distributed.parallel_loader as parallel_loader
class MpDeviceLoaderMock(MagicMock):
def __instancecheck__(self, instance):
# to make `isinstance(dataloader, MpDeviceLoader)` pass with a mock as class
return isinstance_return
mp_loader_mock = MpDeviceLoaderMock()
monkeypatch.setattr(parallel_loader, "MpDeviceLoader", mp_loader_mock)
processed_dataloader = strategy.process_dataloader(dataloader)
assert processed_dataloader is dataloader
mp_loader_mock.assert_not_called() # no-op
isinstance_return = False
processed_dataloader = strategy.process_dataloader(dataloader)
mp_loader_mock.assert_called_with(dataloader, strategy.root_device)
assert processed_dataloader.dataset == processed_dataloader._loader.dataset
assert processed_dataloader.batch_sampler == processed_dataloader._loader.batch_sampler
def test_warning_if_tpus_not_used(tpu_available):
with pytest.warns(UserWarning, match="TPU available but not used"):
Trainer(accelerator="cpu")
@pytest.mark.parametrize(
("devices", "expected_device_ids"),
[
(1, [0]),
(8, list(range(8))),
("8", list(range(8))),
([2], [2]),
("2,", [2]),
],
)
@RunIf(min_python="3.9") # mocking issue
def test_trainer_config_device_ids(devices, expected_device_ids, tpu_available, monkeypatch):
monkeypatch.setattr(lightning.fabric.accelerators.xla, "_using_pjrt", lambda: True)
mock = DeviceMock()
monkeypatch.setattr(torch, "device", mock)
if _IS_WINDOWS:
# simulate fork support on windows
monkeypatch.setattr(torch.multiprocessing, "get_all_start_methods", lambda: ["fork", "spawn"])
trainer = Trainer(accelerator="tpu", devices=devices)
assert mock.mock_calls == [call("xla", i) for i in expected_device_ids]
assert len(trainer.device_ids) == len(expected_device_ids)
assert trainer.num_devices == len(expected_device_ids)
| NestedModule |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 6167,
"end": 6290
} | class ____(ParentI):
def f(self):
__class__: "Any"
super
builtins.super(ChildI8, self).f()
| ChildI8 |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 142593,
"end": 156093
} | class ____(Patch):
"""
A fancy arrow patch.
It draws an arrow using the `ArrowStyle`. It is primarily used by the
`~.axes.Axes.annotate` method. For most purposes, use the annotate method for
drawing arrows.
The head and tail positions are fixed at the specified start and end points
of the arrow, but the size and shape (in display coordinates) of the arrow
does not change when the axis is moved or zoomed.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return f"{type(self).__name__}(({x1:g}, {y1:g})->({x2:g}, {y2:g}))"
else:
return f"{type(self).__name__}({self._path_original})"
@_docstring.interpd
def __init__(self, posA=None, posB=None, *,
path=None, arrowstyle="simple", connectionstyle="arc3",
patchA=None, patchB=None, shrinkA=2, shrinkB=2,
mutation_scale=1, mutation_aspect=1, **kwargs):
"""
**Defining the arrow position and path**
There are two ways to define the arrow position and path:
- **Start, end and connection**:
The typical approach is to define the start and end points of the
arrow using *posA* and *posB*. The curve between these two can
further be configured using *connectionstyle*.
If given, the arrow curve is clipped by *patchA* and *patchB*,
allowing it to start/end at the border of these patches.
Additionally, the arrow curve can be shortened by *shrinkA* and *shrinkB*
to create a margin between start/end (after possible clipping) and the
drawn arrow.
- **path**: Alternatively if *path* is provided, an arrow is drawn along
this Path. In this case, *connectionstyle*, *patchA*, *patchB*,
*shrinkA*, and *shrinkB* are ignored.
**Styling**
The *arrowstyle* defines the styling of the arrow head, tail and shaft.
The resulting arrows can be styled further by setting the `.Patch`
properties such as *linewidth*, *color*, *facecolor*, *edgecolor*
etc. via keyword arguments.
Parameters
----------
posA, posB : (float, float), optional
(x, y) coordinates of start and end point of the arrow.
The actually drawn start and end positions may be modified
through *patchA*, *patchB*, *shrinkA*, and *shrinkB*.
*posA*, *posB* are exclusive of *path*.
path : `~matplotlib.path.Path`, optional
If provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
*path* is exclusive of *posA*, *posB*.
arrowstyle : str or `.ArrowStyle`, default: 'simple'
The styling of arrow head, tail and shaft. This can be
- `.ArrowStyle` or one of its subclasses
- The shorthand string name (e.g. "->") as given in the table below,
optionally containing a comma-separated list of style parameters,
e.g. "->, head_length=10, head_width=5".
The style parameters are scaled by *mutation_scale*.
The following arrow styles are available. See also
:doc:`/gallery/text_labels_and_annotations/fancyarrow_demo`.
%(ArrowStyle:table)s
Only the styles ``<|-``, ``-|>``, ``<|-|>`` ``simple``, ``fancy``
and ``wedge`` contain closed paths and can be filled.
connectionstyle : str or `.ConnectionStyle` or None, optional, \
default: 'arc3'
`.ConnectionStyle` with which *posA* and *posB* are connected.
This can be
- `.ConnectionStyle` or one of its subclasses
- The shorthand string name as given in the table below, e.g. "arc3".
%(ConnectionStyle:table)s
Ignored if *path* is provided.
patchA, patchB : `~matplotlib.patches.Patch`, default: None
Optional Patches at *posA* and *posB*, respectively. If given,
the arrow path is clipped by these patches such that head and tail
are at the border of the patches.
Ignored if *path* is provided.
shrinkA, shrinkB : float, default: 2
Shorten the arrow path at *posA* and *posB* by this amount in points.
This allows to add a margin between the intended start/end points and
the arrow.
Ignored if *path* is provided.
mutation_scale : float, default: 1
Value with which attributes of *arrowstyle* (e.g., *head_length*)
will be scaled.
mutation_aspect : None or float, default: None
The height of the rectangle will be squeezed by this value before
the mutation and the mutated box will be stretched by the inverse
of it.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties, optional
Here is a list of available `.Patch` properties:
%(Patch:kwdoc)s
In contrast to other patches, the default ``capstyle`` and
``joinstyle`` for `FancyArrowPatch` are set to ``"round"``.
"""
# Traditionally, the cap- and joinstyle for FancyArrowPatch are round
kwargs.setdefault("joinstyle", JoinStyle.round)
kwargs.setdefault("capstyle", CapStyle.round)
super().__init__(**kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
else:
raise ValueError("Either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self._dpi_cor = 1.0
def set_positions(self, posA, posB):
"""
Set the start and end positions of the connecting path.
Parameters
----------
posA, posB : None, tuple
(x, y) coordinates of arrow tail and arrow head respectively. If
`None` use current value.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
"""
Set the tail patch.
Parameters
----------
patchA : `.patches.Patch`
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
"""
Set the head patch.
Parameters
----------
patchB : `.patches.Patch`
"""
self.patchB = patchB
self.stale = True
@_docstring.interpd
def set_connectionstyle(self, connectionstyle=None, **kwargs):
"""
Set the connection style, possibly with further attributes.
Attributes from the previous connection style are not reused.
Without argument (or with ``connectionstyle=None``), the available box
styles are returned as a human-readable string.
Parameters
----------
connectionstyle : str or `~matplotlib.patches.ConnectionStyle`
The style of the connection: either a `.ConnectionStyle` instance,
or a string, which is the style name and optionally comma separated
attributes (e.g. "Arc,armA=30,rad=10"). Such a string is used to
construct a `.ConnectionStyle` object, as documented in that class.
The following connection styles are available:
%(ConnectionStyle:table_and_accepts)s
**kwargs
Additional attributes for the connection style. See the table above
for supported parameters.
Examples
--------
::
set_connectionstyle("Arc,armA=30,rad=10")
set_connectionstyle("arc", armA=30, rad=10)
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
self._connector = (
ConnectionStyle(connectionstyle, **kwargs)
if isinstance(connectionstyle, str) else connectionstyle)
self.stale = True
def get_connectionstyle(self):
"""Return the `ConnectionStyle` used."""
return self._connector
@_docstring.interpd
def set_arrowstyle(self, arrowstyle=None, **kwargs):
"""
Set the arrow style, possibly with further attributes.
Attributes from the previous arrow style are not reused.
Without argument (or with ``arrowstyle=None``), the available box
styles are returned as a human-readable string.
Parameters
----------
arrowstyle : str or `~matplotlib.patches.ArrowStyle`
The style of the arrow: either a `.ArrowStyle` instance, or a
string, which is the style name and optionally comma separated
attributes (e.g. "Fancy,head_length=0.2"). Such a string is used to
construct a `.ArrowStyle` object, as documented in that class.
The following arrow styles are available:
%(ArrowStyle:table_and_accepts)s
**kwargs
Additional attributes for the arrow style. See the table above for
supported parameters.
Examples
--------
::
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
self._arrow_transmuter = (
ArrowStyle(arrowstyle, **kwargs)
if isinstance(arrowstyle, str) else arrowstyle)
self.stale = True
def get_arrowstyle(self):
"""Return the arrowstyle object."""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
Parameters
----------
scale : float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
Returns
-------
scalar
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
Parameters
----------
aspect : float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""Return the aspect ratio of the bbox mutation."""
return (self._mutation_aspect if self._mutation_aspect is not None
else 1) # backcompat.
def get_path(self):
"""Return the path of the arrow in the data coordinates."""
# The path is generated in display coordinates, then converted back to
# data coordinates.
_path, fillable = self._get_path_in_displaycoord()
if np.iterable(fillable):
_path = Path.make_compound_path(*_path)
return self.get_transform().inverted().transform_path(_path)
def _get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self._dpi_cor
if self._posA_posB is not None:
posA = self._convert_xy_units(self._posA_posB[0])
posB = self._convert_xy_units(self._posA_posB[1])
(posA, posB) = self.get_transform().transform((posA, posB))
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect())
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
# FIXME: dpi_cor is for the dpi-dependency of the linewidth. There
# could be room for improvement. Maybe _get_path_in_displaycoord could
# take a renderer argument, but get_path should be adapted too.
self._dpi_cor = renderer.points_to_pixels(1.)
path, fillable = self._get_path_in_displaycoord()
if not np.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
self._draw_paths_with_artist_properties(
renderer,
[(p, affine, self._facecolor if f and self._facecolor[3] else None)
for p, f in zip(path, fillable)])
| FancyArrowPatch |
python | walkccc__LeetCode | solutions/1467. Probability of a Two Boxes Having The Same Number of Distinct Balls/1467.py | {
"start": 91,
"end": 1266
} | class ____:
def getProbability(self, balls: list[int]) -> float:
n = sum(balls) // 2
fact = [1, 1, 2, 6, 24, 120, 720]
def cases(
i: int,
ballsCountA: int,
ballsCountB: int,
colorsCountA: int,
colorsCountB,
boxCase: BoxCase) -> float:
if ballsCountA > n or ballsCountB > n:
return 0
if i == len(balls):
return (1 if boxCase == BoxCase.EQUAL_BALLS
else colorsCountA == colorsCountB)
ans = 0.0
# balls taken from A for `balls[i]`
for ballsTakenA in range(balls[i] + 1):
ballsTakenB = balls[i] - ballsTakenA
newcolorsCountA = colorsCountA + (ballsTakenA > 0)
newcolorsCountB = colorsCountB + (ballsTakenB > 0)
ans += (cases(i + 1,
ballsCountA + ballsTakenA,
ballsCountB + ballsTakenB,
newcolorsCountA, newcolorsCountB, boxCase) /
(fact[ballsTakenA] * fact[ballsTakenB]))
return ans
return (cases(0, 0, 0, 0, 0, BoxCase.EQUAL_DISTANT_BALLS) /
cases(0, 0, 0, 0, 0, BoxCase.EQUAL_BALLS))
| Solution |
python | huggingface__transformers | src/transformers/models/bamba/modeling_bamba.py | {
"start": 46141,
"end": 46882
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
@use_kernel_forward_from_hub("RMSNorm")
| BambaMLP |
python | pypa__hatch | backend/src/hatchling/version/source/code.py | {
"start": 126,
"end": 2348
} | class ____(VersionSourceInterface):
PLUGIN_NAME = "code"
def get_version_data(self) -> dict:
import sys
from importlib.util import module_from_spec, spec_from_file_location
relative_path = self.config.get("path")
if not relative_path:
message = "option `path` must be specified"
raise ValueError(message)
if not isinstance(relative_path, str):
message = "option `path` must be a string"
raise TypeError(message)
path = os.path.normpath(os.path.join(self.root, relative_path))
if not os.path.isfile(path):
message = f"file does not exist: {relative_path}"
raise OSError(message)
expression = self.config.get("expression") or "__version__"
if not isinstance(expression, str):
message = "option `expression` must be a string"
raise TypeError(message)
search_paths = self.config.get("search-paths", [])
if not isinstance(search_paths, list):
message = "option `search-paths` must be an array"
raise TypeError(message)
absolute_search_paths = []
for i, search_path in enumerate(search_paths, 1):
if not isinstance(search_path, str):
message = f"entry #{i} of option `search-paths` must be a string"
raise TypeError(message)
absolute_search_paths.append(os.path.normpath(os.path.join(self.root, search_path)))
spec = spec_from_file_location(os.path.splitext(path)[0], path)
module = module_from_spec(spec) # type: ignore[arg-type]
old_search_paths = list(sys.path)
try:
sys.path[:] = [*absolute_search_paths, *old_search_paths]
spec.loader.exec_module(module) # type: ignore[union-attr]
finally:
sys.path[:] = old_search_paths
# Execute the expression to determine the version
version = eval(expression, vars(module)) # noqa: S307
return {"version": version}
def set_version(self, version: str, version_data: dict) -> None:
message = "Cannot rewrite loaded code"
raise NotImplementedError(message)
| CodeSource |
python | sqlalchemy__sqlalchemy | test/orm/test_lazy_relations.py | {
"start": 37576,
"end": 40149
} | class ____(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_m2o_noload(self):
"""test that a NULL foreign key doesn't trigger a lazy load"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
sess = fixture_session()
ad1 = Address(email_address="somenewaddress", id=12)
sess.add(ad1)
sess.flush()
sess.expunge_all()
ad2 = sess.get(Address, 1)
ad3 = sess.get(Address, ad1.id)
def go():
# one lazy load
assert ad2.user.name == "jack"
# no lazy load
assert ad3.user is None
self.assert_sql_count(testing.db, go, 1)
@testing.fixture()
def composite_overlapping_fixture(self, decl_base, connection):
def go(allow_partial_pks):
class Section(decl_base):
__tablename__ = "sections"
year = Column(Integer, primary_key=True)
idx = Column(Integer, primary_key=True)
parent_idx = Column(Integer)
if not allow_partial_pks:
__mapper_args__ = {"allow_partial_pks": False}
ForeignKeyConstraint((year, parent_idx), (year, idx))
parent = relationship(
"Section",
primaryjoin=and_(
year == remote(year),
foreign(parent_idx) == remote(idx),
),
)
decl_base.metadata.create_all(connection)
connection.commit()
with Session(connection) as sess:
sess.add(Section(year=5, idx=1, parent_idx=None))
sess.commit()
return Section
return go
@testing.variation("allow_partial_pks", [True, False])
def test_composite_m2o_load_partial_pks(
self, allow_partial_pks, composite_overlapping_fixture
):
Section = composite_overlapping_fixture(allow_partial_pks)
session = fixture_session()
section = session.get(Section, (5, 1))
with self.assert_statement_count(
testing.db, 1 if allow_partial_pks else 0
):
testing.is_none(section.parent)
| M2OGetTest |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 70396,
"end": 71349
} | class ____:
@xfail_xp_backends(
'dask.array', reason='https://github.com/dask/dask/issues/11883'
)
def test_basic(self, xp):
z = xp.asarray([])
p = xp.asarray([(-1+1j) / math.sqrt(2), (-1-1j) / math.sqrt(2)])
k = 1
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5)
xp_assert_equal(z_hp, xp.asarray([0.0, 0.0], dtype=z_hp.dtype))
xp_assert_close(_sort_cmplx(p_hp, xp=xp), _sort_cmplx(p, xp=xp) * 5)
assert math.isclose(k_hp, 1.0, rel_tol=4e-7)
z = xp.asarray([-2j, +2j])
p = xp.asarray([-0.75, -0.5-0.5j, -0.5+0.5j])
k = 3
z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6)
xp_assert_close(
_sort_cmplx(z_hp, xp=xp), _sort_cmplx([-3j, 0, +3j], xp=xp)
)
xp_assert_close(
_sort_cmplx(p_hp, xp=xp), _sort_cmplx([-8, -6-6j, -6+6j], xp=xp)
)
assert k_hp == 32.0
@make_xp_test_case(lp2bp_zpk)
| TestLp2hp_zpk |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_responses.py | {
"start": 372,
"end": 491
} | class ____(BaseModel):
"""Custom model with a custom docstring."""
value: float
description: str
| CustomModel |
python | celery__celery | t/integration/test_backend.py | {
"start": 280,
"end": 1120
} | class ____:
def test_crud(self, manager):
backend = AzureBlockBlobBackend(
app=manager.app,
url=os.environ["AZUREBLOCKBLOB_URL"])
key_values = {("akey%d" % i).encode(): "avalue%d" % i
for i in range(5)}
for key, value in key_values.items():
backend._set_with_state(key, value, states.SUCCESS)
actual_values = backend.mget(key_values.keys())
expected_values = list(key_values.values())
assert expected_values == actual_values
for key in key_values:
backend.delete(key)
def test_get_missing(self, manager):
backend = AzureBlockBlobBackend(
app=manager.app,
url=os.environ["AZUREBLOCKBLOB_URL"])
assert backend.get(b"doesNotExist") is None
| test_AzureBlockBlobBackend |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/functional_saver_test.py | {
"start": 1712,
"end": 10996
} | class ____(test.TestCase):
def setUp(self):
super(SaverTest, self).setUp()
cpus = config.list_physical_devices("CPU")
# Set 3 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
self.local_options = checkpoint_options.CheckpointOptions(
experimental_io_device=LOCALHOST)
def _get_tensors_by_task(self, root):
serialized_tensors, _, _, _ = (
checkpoint.TrackableSaver(graph_view.ObjectGraphView(root))
._gather_serialized_tensors(None))
tensors_by_task = {}
for tensor_dict in serialized_tensors.values():
for checkpoint_key, maybe_tensor in tensor_dict.items():
if not isinstance(maybe_tensor, dict):
maybe_tensor = {"": maybe_tensor}
for slice_spec, tensor in maybe_tensor.items():
tensor_task = saveable_object_util.set_cpu0(tensor.device)
(tensors_by_task
.setdefault(tensor_task, {})
.setdefault(checkpoint_key, {})[slice_spec]) = tensor
return tensors_by_task
@test_util.run_in_graph_and_eager_modes
def test_resource_variable(self):
v1 = resource_variable_ops.ResourceVariable(2.)
self.evaluate(v1.initializer)
saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v1.assign(1.))
self.evaluate(saver.restore(prefix))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
self.evaluate(v2.initializer)
second_saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v2, "x"))
self.evaluate(second_saver.restore(prefix))
self.assertEqual(2., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def test_resource_variable_use_localhost(self):
v1 = resource_variable_ops.ResourceVariable(2.)
self.evaluate(v1.initializer)
saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix), self.local_options))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v1.assign(1.))
self.evaluate(saver.restore(prefix, self.local_options))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
self.evaluate(v2.initializer)
second_saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v2, "x"))
self.evaluate(second_saver.restore(prefix, self.local_options))
self.assertEqual(2., self.evaluate(v2))
# In graph mode, verify that the save and restore ops were set to run on
# localhost.
if not context.executing_eagerly():
for op in ops.get_default_graph().get_operations():
if op.type in ("SaveV2", "RestoreV2"):
self.assertEqual(LOCALHOST, op.device)
def test_to_proto(self):
v1 = resource_variable_ops.ResourceVariable(2.)
saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
proto_accumulator = []
wrapped = wrap_function.wrap_function(
lambda: proto_accumulator.append(saver.to_proto()), signature=())
self.assertEqual(1, len(proto_accumulator))
proto = proto_accumulator[0]
save = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_tensor_by_name(proto.save_tensor_name))
restore = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_operation_by_name(proto.restore_op_name))
save_path = save(constant_op.constant(prefix))
v1.assign(1.)
restore(constant_op.constant(save_path))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
second_saver = functional_saver.MultiDeviceSaver.from_saveables(
saveable_object_util.saveable_objects_for_op(v2, "x"))
second_saver.restore(save_path)
self.assertEqual(2., self.evaluate(v2))
@test_util.disable_tfrt("b/171765113: server is not supported in TFRT yet.")
def test_checkpoint_is_sharded_by_task(self):
servers = [server_lib.Server.create_local_server() for _ in range(3)]
cluster_spec = server_lib.ClusterSpec({
"worker": [s.target[len("grpc://"):] for s in servers]})
remote.connect_to_cluster(cluster_spec)
with ops.device("/job:worker/task:0/cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("/job:worker/task:1/cpu:0"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("/job:worker/task:2/cpu:0"):
v2 = resource_variable_ops.ResourceVariable(2.)
self.evaluate([v0.initializer, v1.initializer, v2.initializer])
saver = functional_saver.MultiDeviceSaver.from_saveables(
list(saveable_object_util.saveable_objects_for_op(v0, "v0")) +
list(saveable_object_util.saveable_objects_for_op(v1, "v1")) +
list(saveable_object_util.saveable_objects_for_op(v2, "v2")))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(4, len(gfile.Glob(prefix + "*")))
self.evaluate(v0.assign(-1.))
self.evaluate(v1.assign(-1.))
self.evaluate(v2.assign(-1.))
self.evaluate(saver.restore(constant_op.constant(prefix)))
self.assertEqual(0., self.evaluate(v0))
self.assertEqual(1., self.evaluate(v1))
self.assertEqual(2., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def test_checkpoint_multi_device_using_localhost(self):
with ops.device("cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("cpu:1"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("cpu:2"):
v2 = resource_variable_ops.ResourceVariable(2.)
self.evaluate([v0.initializer, v1.initializer, v2.initializer])
saver = functional_saver.MultiDeviceSaver.from_saveables(
list(saveable_object_util.saveable_objects_for_op(v0, "v0")) +
list(saveable_object_util.saveable_objects_for_op(v1, "v1")) +
list(saveable_object_util.saveable_objects_for_op(v2, "v2")))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix), self.local_options))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v0.assign(-1.))
self.evaluate(v1.assign(-1.))
self.evaluate(v2.assign(-1.))
self.evaluate(
saver.restore(constant_op.constant(prefix), self.local_options))
self.assertEqual(0., self.evaluate(v0))
self.assertEqual(1., self.evaluate(v1))
self.assertEqual(2., self.evaluate(v2))
# In graph mode, verify that the save and restore ops were set to run on
# localhost.
if not context.executing_eagerly():
for op in ops.get_default_graph().get_operations():
if op.type in ("SaveV2", "RestoreV2", "MergeV2Checkpoints"):
self.assertEqual(LOCALHOST, op.device)
def test_single_task_save_singlehost_multidevice(self):
root = module.Module()
with ops.device("cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("cpu:1"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("cpu:2"):
v2 = resource_variable_ops.ResourceVariable(2.)
root.v0 = v0
root.v1 = v1
root.v2 = v2
tensors_by_task = self._get_tensors_by_task(root)
var_names = [
"v0/.ATTRIBUTES/VARIABLE_VALUE",
"v1/.ATTRIBUTES/VARIABLE_VALUE",
"v2/.ATTRIBUTES/VARIABLE_VALUE"
]
vars_numpy = [v0.numpy(), v1.numpy(), v2.numpy()]
tmp_dir = self.get_temp_dir()
for device in ["cpu:0", "cpu:1", "cpu:2"]:
for shard, (_, tensor_slice_dict) in enumerate(
sorted(tensors_by_task.items())[1:]):
with ops.device(device):
shard_prefix = gen_io_ops.sharded_filename(
os.path.join(tmp_dir, str(shard)), shard, 3)
functional_saver._single_task_save(
shard_prefix, tensor_slice_dict)
start_time = time.time()
max_save_time = start_time + 5 # seconds
while not (gfile.ListDirectory(tmp_dir) or time.time() > max_save_time):
pass # eager execution is lovely
self.assertNotEmpty(gfile.ListDirectory(tmp_dir))
with ops.device(device):
restored_dict = functional_saver._single_task_restore(
shard_prefix, tensor_slice_dict)
self.evaluate(restored_dict)
self.assertEqual(
restored_dict[var_names[shard]][""].numpy(),
vars_numpy[shard])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| SaverTest |
python | davidhalter__parso | parso/python/tokenize.py | {
"start": 8651,
"end": 8830
} | class ____(Token):
def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %
self._replace(type=self.type.name))
| PythonToken |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 7296,
"end": 8401
} | class ____(ASTBaseBase):
def __init__(self, attrs: list[ASTAttribute]) -> None:
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTAttributeList):
return NotImplemented
return self.attrs == other.attrs
def __hash__(self) -> int:
return hash(self.attrs)
def __len__(self) -> int:
return len(self.attrs)
def __add__(self, other: ASTAttributeList) -> ASTAttributeList:
return ASTAttributeList(self.attrs + other.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
return ' '.join(map(transform, self.attrs))
def describe_signature(self, signode: TextElement) -> None:
if len(self.attrs) == 0:
return
self.attrs[0].describe_signature(signode)
if len(self.attrs) == 1:
return
for attr in self.attrs[1:]:
signode.append(addnodes.desc_sig_space())
attr.describe_signature(signode)
################################################################################
| ASTAttributeList |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/aggregate.py | {
"start": 7255,
"end": 7927
} | class ____(GenericBase):
@staticmethod
def visit_eq(expression: Expression, value: UUID) -> Condition:
return contains(UUIDScalar.visit_eq(expression, value))
@staticmethod
def visit_neq(expression: Expression, value: UUID) -> Condition:
return does_not_contain(UUIDScalar.visit_eq(expression, value))
@staticmethod
def visit_in(expression: Expression, value: list[UUID]) -> Condition:
return contains(UUIDScalar.visit_in(expression, value))
@staticmethod
def visit_not_in(expression: Expression, value: list[UUID]) -> Condition:
return does_not_contain(UUIDScalar.visit_in(expression, value))
| SumOfUUIDScalar |
python | davidhalter__jedi | test/completion/arrays.py | {
"start": 4476,
"end": 5620
} | class ____(list):
def __getitem__(self, index):
return super()[index]
#?
SuperYeah([1])[0]
#?
SuperYeah()[0]
# -----------------
# conversions
# -----------------
a = [1, ""]
#? int() str()
list(a)[1]
#? int() str()
list(a)[0]
#?
set(a)[0]
#? int() str()
list(set(a))[1]
#? int() str()
next(iter(set(a)))
#? int() str()
list(list(set(a)))[1]
# does not yet work, because the recursion catching is not good enough (catches # to much)
#? int() str()
list(set(list(set(a))))[1]
#? int() str()
list(set(set(a)))[1]
# frozenset
#? int() str()
list(frozenset(a))[1]
#? int() str()
list(set(frozenset(a)))[1]
# iter
#? int() str()
list(iter(a))[1]
#? int() str()
list(iter(list(set(a))))[1]
# tuple
#? int() str()
tuple(a)[1]
#? int() str()
tuple(list(set(a)))[1]
#? int()
tuple((1,))[0]
# implementation detail for lists, should not be visible
#? []
list().__iterable
# With a list comprehension.
for i in set(a for a in [1]):
#? int()
i
# -----------------
# Merged Arrays
# -----------------
for x in [1] + ['']:
#? int() str()
x
# -----------------
# Potential Recursion Issues
# -----------------
| SuperYeah |
python | redis__redis-py | tests/test_asyncio/test_cluster.py | {
"start": 98176,
"end": 111034
} | class ____:
"""
Tests for the NodesManager class
"""
async def test_load_balancer(self, r: RedisCluster) -> None:
n_manager = r.nodes_manager
lb = n_manager.read_load_balancer
slot_1 = 1257
slot_2 = 8975
node_1 = ClusterNode(default_host, 6379, PRIMARY)
node_2 = ClusterNode(default_host, 6378, REPLICA)
node_3 = ClusterNode(default_host, 6377, REPLICA)
node_4 = ClusterNode(default_host, 6376, PRIMARY)
node_5 = ClusterNode(default_host, 6375, REPLICA)
n_manager.slots_cache = {
slot_1: [node_1, node_2, node_3],
slot_2: [node_4, node_5],
}
primary1_name = n_manager.slots_cache[slot_1][0].name
primary2_name = n_manager.slots_cache[slot_2][0].name
list1_size = len(n_manager.slots_cache[slot_1])
list2_size = len(n_manager.slots_cache[slot_2])
# default load balancer strategy: LoadBalancerStrategy.ROUND_ROBIN
# slot 1
assert lb.get_server_index(primary1_name, list1_size) == 0
assert lb.get_server_index(primary1_name, list1_size) == 1
assert lb.get_server_index(primary1_name, list1_size) == 2
assert lb.get_server_index(primary1_name, list1_size) == 0
# slot 2
assert lb.get_server_index(primary2_name, list2_size) == 0
assert lb.get_server_index(primary2_name, list2_size) == 1
assert lb.get_server_index(primary2_name, list2_size) == 0
lb.reset()
assert lb.get_server_index(primary1_name, list1_size) == 0
assert lb.get_server_index(primary2_name, list2_size) == 0
# reset the indexes before load balancing strategy test
lb.reset()
# load balancer strategy: LoadBalancerStrategy.ROUND_ROBIN_REPLICAS
for i in [1, 2, 1]:
srv_index = lb.get_server_index(
primary1_name,
list1_size,
load_balancing_strategy=LoadBalancingStrategy.ROUND_ROBIN_REPLICAS,
)
assert srv_index == i
# reset the indexes before load balancing strategy test
lb.reset()
# load balancer strategy: LoadBalancerStrategy.RANDOM_REPLICA
for i in range(5):
srv_index = lb.get_server_index(
primary1_name,
list1_size,
load_balancing_strategy=LoadBalancingStrategy.RANDOM_REPLICA,
)
assert srv_index > 0 and srv_index <= 2
async def test_init_slots_cache_not_all_slots_covered(self) -> None:
"""
Test that if not all slots are covered it should raise an exception
"""
# Missing slot 5460
cluster_slots = [
[0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
[5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
[10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
with pytest.raises(RedisClusterException) as ex:
rc = await get_mocked_redis_client(
host=default_host,
port=default_port,
cluster_slots=cluster_slots,
require_full_coverage=True,
)
await rc.aclose()
assert str(ex.value).startswith(
"All slots are not covered after query all startup_nodes."
)
async def test_init_slots_cache_not_require_full_coverage_success(self) -> None:
"""
When require_full_coverage is set to False and not all slots are
covered the cluster client initialization should succeed
"""
# Missing slot 5460
cluster_slots = [
[0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
[5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
[10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
rc = await get_mocked_redis_client(
host=default_host,
port=default_port,
cluster_slots=cluster_slots,
require_full_coverage=False,
)
assert 5460 not in rc.nodes_manager.slots_cache
await rc.aclose()
async def test_init_slots_cache(self) -> None:
"""
Test that slots cache can in initialized and all slots are covered
"""
good_slots_resp = [
[0, 5460, ["127.0.0.1", 7000], ["127.0.0.2", 7003]],
[5461, 10922, ["127.0.0.1", 7001], ["127.0.0.2", 7004]],
[10923, 16383, ["127.0.0.1", 7002], ["127.0.0.2", 7005]],
]
rc = await get_mocked_redis_client(
host=default_host, port=default_port, cluster_slots=good_slots_resp
)
n_manager = rc.nodes_manager
assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
for slot_info in good_slots_resp:
all_hosts = ["127.0.0.1", "127.0.0.2"]
all_ports = [7000, 7001, 7002, 7003, 7004, 7005]
slot_start = slot_info[0]
slot_end = slot_info[1]
for i in range(slot_start, slot_end + 1):
assert len(n_manager.slots_cache[i]) == len(slot_info[2:])
assert n_manager.slots_cache[i][0].host in all_hosts
assert n_manager.slots_cache[i][1].host in all_hosts
assert n_manager.slots_cache[i][0].port in all_ports
assert n_manager.slots_cache[i][1].port in all_ports
assert len(n_manager.nodes_cache) == 6
await rc.aclose()
async def test_init_slots_cache_cluster_mode_disabled(self) -> None:
"""
Test that creating a RedisCluster failes if one of the startup nodes
has cluster mode disabled
"""
with pytest.raises(RedisClusterException) as e:
rc = await get_mocked_redis_client(
cluster_slots_raise_error=True,
host=default_host,
port=default_port,
cluster_enabled=False,
)
await rc.aclose()
assert "Cluster mode is not enabled on this node" in str(e.value)
async def test_empty_startup_nodes(self) -> None:
"""
It should not be possible to create a node manager with no nodes
specified
"""
with pytest.raises(RedisClusterException):
await NodesManager([], False, {}).initialize()
async def test_wrong_startup_nodes_type(self) -> None:
"""
If something other then a list type itteratable is provided it should
fail
"""
with pytest.raises(RedisClusterException):
await NodesManager({}, False, {}).initialize()
async def test_init_slots_cache_slots_collision(self) -> None:
"""
Test that if 2 nodes do not agree on the same slots setup it should
raise an error. In this test both nodes will say that the first
slots block should be bound to different servers.
"""
with mock.patch.object(
ClusterNode, "execute_command", autospec=True
) as execute_command:
async def mocked_execute_command(self, *args, **kwargs):
"""
Helper function to return custom slots cache data from
different redis nodes
"""
if self.port == 7000:
result = [
[0, 5460, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
[5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
]
elif self.port == 7001:
result = [
[0, 5460, ["127.0.0.1", 7001], ["127.0.0.1", 7003]],
[5461, 10922, ["127.0.0.1", 7000], ["127.0.0.1", 7004]],
]
else:
result = []
if args[0] == "CLUSTER SLOTS":
return result
elif args[0] == "INFO":
return {"cluster_enabled": True}
elif args[1] == "cluster-require-full-coverage":
return {"cluster-require-full-coverage": "yes"}
execute_command.side_effect = mocked_execute_command
with pytest.raises(RedisClusterException) as ex:
node_1 = ClusterNode("127.0.0.1", 7000)
node_2 = ClusterNode("127.0.0.1", 7001)
async with RedisCluster(startup_nodes=[node_1, node_2]):
...
assert str(ex.value).startswith(
"startup_nodes could not agree on a valid slots cache"
), str(ex.value)
async def test_cluster_one_instance(self) -> None:
"""
If the cluster exists of only 1 node then there is some hacks that must
be validated they work.
"""
node = ClusterNode(default_host, default_port)
cluster_slots = [[0, 16383, ["", default_port]]]
rc = await get_mocked_redis_client(
startup_nodes=[node], cluster_slots=cluster_slots
)
n = rc.nodes_manager
assert len(n.nodes_cache) == 1
n_node = rc.get_node(node_name=node.name)
assert n_node is not None
assert n_node == node
assert n_node.server_type == PRIMARY
assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
assert n.slots_cache[i] == [n_node]
await rc.aclose()
async def test_init_with_down_node(self) -> None:
"""
If I can't connect to one of the nodes, everything should still work.
But if I can't connect to any of the nodes, exception should be thrown.
"""
with mock.patch.object(
ClusterNode, "execute_command", autospec=True
) as execute_command:
async def mocked_execute_command(self, *args, **kwargs):
if self.port == 7000:
raise ConnectionError("mock connection error for 7000")
if args[0] == "CLUSTER SLOTS":
return [
[0, 8191, ["127.0.0.1", 7001, "node_1"]],
[8192, 16383, ["127.0.0.1", 7002, "node_2"]],
]
elif args[0] == "INFO":
return {"cluster_enabled": True}
elif args[1] == "cluster-require-full-coverage":
return {"cluster-require-full-coverage": "yes"}
execute_command.side_effect = mocked_execute_command
node_1 = ClusterNode("127.0.0.1", 7000)
node_2 = ClusterNode("127.0.0.1", 7001)
# If all startup nodes fail to connect, connection error should be
# thrown
with pytest.raises(RedisClusterException) as e:
async with RedisCluster(startup_nodes=[node_1]):
...
assert "Redis Cluster cannot be connected" in str(e.value)
with mock.patch.object(
AsyncCommandsParser, "initialize", autospec=True
) as cmd_parser_initialize:
def cmd_init_mock(self, r: ClusterNode) -> None:
self.commands = {
"GET": {
"name": "get",
"arity": 2,
"flags": ["readonly", "fast"],
"first_key_pos": 1,
"last_key_pos": 1,
"step_count": 1,
}
}
cmd_parser_initialize.side_effect = cmd_init_mock
# When at least one startup node is reachable, the cluster
# initialization should succeeds
async with RedisCluster(startup_nodes=[node_1, node_2]) as rc:
assert rc.get_node(host=default_host, port=7001) is not None
assert rc.get_node(host=default_host, port=7002) is not None
@pytest.mark.parametrize("dynamic_startup_nodes", [True, False])
async def test_init_slots_dynamic_startup_nodes(self, dynamic_startup_nodes):
rc = await get_mocked_redis_client(
host="my@DNS.com",
port=7000,
cluster_slots=default_cluster_slots,
dynamic_startup_nodes=dynamic_startup_nodes,
)
# Nodes are taken from default_cluster_slots
discovered_nodes = [
"127.0.0.1:7000",
"127.0.0.1:7001",
"127.0.0.1:7002",
"127.0.0.1:7003",
]
startup_nodes = list(rc.nodes_manager.startup_nodes.keys())
if dynamic_startup_nodes is True:
assert sorted(startup_nodes) == sorted(discovered_nodes)
else:
assert startup_nodes == ["my@DNS.com:7000"]
| TestNodesManager |
python | getsentry__sentry | tests/apidocs/endpoints/releases/test_organization_releases.py | {
"start": 269,
"end": 2069
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization(owner=user, name="blah")
org2 = self.create_organization(owner=user, name="bloop")
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
self.create_team_membership(team1, user=user)
self.create_team_membership(team2, user=user)
self.project1 = self.create_project(teams=[team1], organization=org)
self.project2 = self.create_project(teams=[team2], organization=org2)
self.project3 = self.create_project(teams=[team1], organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(self.project1)
release2 = Release.objects.create(
organization_id=org2.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(self.project2)
self.url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_post(self) -> None:
data = {"version": "1.2.1", "projects": [self.project3.slug]}
response = self.client.post(self.url, data)
request = RequestFactory().post(self.url, data)
self.validate_schema(request, response)
| OrganizationReleasesDocsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.