language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydata__xarray | xarray/backends/common.py | {
"start": 22092,
"end": 22573
} | class ____(AbstractWritableDataStore):
__slots__ = ()
def encode(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
variables, attributes = cf_encoder(variables, attributes)
variables = {
k: ensure_dtype_not_object(v, name=k) for k, v in variables.items()
}
return super().encode(variables, attributes)
| WritableCFDataStore |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/hooks/test_search_ads.py | {
"start": 1251,
"end": 6964
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleSearchAdsReportingHook(gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.get_credentials"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks.search_ads.build")
def test_gen_conn(self, mock_build, mock_get_credentials):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"searchads360",
API_VERSION,
credentials=mock_get_credentials.return_value,
cache_discovery=False,
)
assert mock_build.return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.customer_service"
)
@pytest.mark.parametrize(
("given_args", "expected_args_extras"),
[
({"page_token": None}, {}),
({"page_token": "next_page_token"}, {"pageToken": "next_page_token"}),
({"summary_row_setting": "summary line content"}, {"summaryRowSetting": "summary line content"}),
({"page_size": 10, "validate_only": True}, {"pageSize": 10, "validateOnly": True}),
],
)
def test_search(self, customer_service_mock, given_args, expected_args_extras):
return_value = {"results": [{"x": 1}]}
(
customer_service_mock.searchAds360.return_value.search.return_value.execute
).return_value = return_value
result = self.hook.search(customer_id=CUSTOMER_ID, query=QUERY, **given_args)
expected_args = {
"customerId": CUSTOMER_ID,
"body": {
"query": QUERY,
"pageSize": 10000,
"returnTotalResultsCount": False,
"validateOnly": False,
**expected_args_extras,
},
}
customer_service_mock.searchAds360.return_value.search.assert_called_once_with(**expected_args)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.customer_service"
)
def test_get_custom_column(self, customer_service_mock):
custom_column_id = "custom_column_id"
return_value = {"resourceName": 1}
(
customer_service_mock.customColumns.return_value.get.return_value.execute
).return_value = return_value
result = self.hook.get_custom_column(customer_id=CUSTOMER_ID, custom_column_id=custom_column_id)
customer_service_mock.customColumns.return_value.get.assert_called_once_with(
resourceName=f"customers/{CUSTOMER_ID}/customColumns/{custom_column_id}"
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.customer_service"
)
def test_list_custom_columns(self, customer_service_mock):
return_value = {
"results": [
{"resourceName": f"customers/{CUSTOMER_ID}/customColumns/col1"},
{"resourceName": f"customers/{CUSTOMER_ID}/customColumns/col2"},
]
}
(
customer_service_mock.customColumns.return_value.list.return_value.execute
).return_value = return_value
result = self.hook.list_custom_columns(customer_id=CUSTOMER_ID)
customer_service_mock.customColumns.return_value.list.assert_called_once_with(customerId=CUSTOMER_ID)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.fields_service"
)
def test_get_field(self, fields_service_mock):
field_name = "field_name"
return_value = {
"name": "Field 1",
"resourceName": f"customers/{CUSTOMER_ID}/searchAds360Fields/field1",
}
fields_service_mock.get.return_value.execute.return_value = return_value
result = self.hook.get_field(field_name=field_name)
fields_service_mock.get.assert_called_once_with(resourceName=f"searchAds360Fields/{field_name}")
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.search_ads.GoogleSearchAdsReportingHook.fields_service"
)
@pytest.mark.parametrize(
("given_args", "expected_args_extras"),
[
({"page_token": None}, {}),
({"page_token": "next_page_token"}, {"pageToken": "next_page_token"}),
({"page_size": 10}, {"pageSize": 10}),
],
)
def test_search_fields(self, fields_service_mock, given_args, expected_args_extras):
query = "SELECT field1, field2 FROM campaigns;"
return_value = {
"results": [
{"name": "Field 1", "resourceName": f"customers/{CUSTOMER_ID}/searchAds360Fields/field1"},
{"name": "Field 2", "resourceName": f"customers/{CUSTOMER_ID}/searchAds360Fields/field2"},
]
}
fields_service_mock.search.return_value.execute.return_value = return_value
result = self.hook.search_fields(query=query, **given_args)
expected_args = {"query": query, "pageSize": 10000, **expected_args_extras}
fields_service_mock.search.assert_called_once_with(body=expected_args)
assert return_value == result
| TestGoogleSearchAdsReportingHook |
python | Textualize__textual | tests/option_list/test_option_list_disabled.py | {
"start": 268,
"end": 4555
} | class ____(App[None]):
"""Test option list application."""
def __init__(self, disabled: bool) -> None:
super().__init__()
self.initial_disabled = disabled
def compose(self) -> ComposeResult:
"""Compose the child widgets."""
yield OptionList(
*[
Option(str(n), id=str(n), disabled=self.initial_disabled)
for n in range(100)
]
)
async def test_default_enabled() -> None:
"""Options created enabled should remain enabled."""
async with OptionListApp(False).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for option in range(option_list.option_count):
assert option_list.get_option_at_index(option).disabled is False
async def test_default_disabled() -> None:
"""Options created disabled should remain disabled."""
async with OptionListApp(True).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for option in range(option_list.option_count):
assert option_list.get_option_at_index(option).disabled is True
async def test_enabled_to_disabled_via_index() -> None:
"""It should be possible to change enabled to disabled via index."""
async with OptionListApp(False).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for n in range(option_list.option_count):
assert option_list.get_option_at_index(n).disabled is False
option_list.disable_option_at_index(n)
assert option_list.get_option_at_index(n).disabled is True
async def test_disabled_to_enabled_via_index() -> None:
"""It should be possible to change disabled to enabled via index."""
async with OptionListApp(True).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for n in range(option_list.option_count):
assert option_list.get_option_at_index(n).disabled is True
option_list.enable_option_at_index(n)
assert option_list.get_option_at_index(n).disabled is False
async def test_enabled_to_disabled_via_id() -> None:
"""It should be possible to change enabled to disabled via id."""
async with OptionListApp(False).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for n in range(option_list.option_count):
assert option_list.get_option(str(n)).disabled is False
option_list.disable_option(str(n))
assert option_list.get_option(str(n)).disabled is True
async def test_disabled_to_enabled_via_id() -> None:
"""It should be possible to change disabled to enabled via id."""
async with OptionListApp(True).run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
for n in range(option_list.option_count):
assert option_list.get_option(str(n)).disabled is True
option_list.enable_option(str(n))
assert option_list.get_option(str(n)).disabled is False
async def test_disable_invalid_id() -> None:
"""Disabling an option via an ID that does not exist should throw an error."""
async with OptionListApp(True).run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).disable_option("does-not-exist")
async def test_disable_invalid_index() -> None:
"""Disabling an option via an index that does not exist should throw an error."""
async with OptionListApp(True).run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).disable_option_at_index(4242)
async def test_enable_invalid_id() -> None:
"""Disabling an option via an ID that does not exist should throw an error."""
async with OptionListApp(False).run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).enable_option("does-not-exist")
async def test_enable_invalid_index() -> None:
"""Disabling an option via an index that does not exist should throw an error."""
async with OptionListApp(False).run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).enable_option_at_index(4242)
| OptionListApp |
python | walkccc__LeetCode | solutions/1451. Rearrange Words in a Sentence/1451.py | {
"start": 0,
"end": 382
} | class ____:
def arrangeWords(self, text: str) -> str:
words = text.split()
count = collections.defaultdict(list)
for word in words:
count[len(word)].append(word.lower())
c2 = OrderedDict(sorted(count.items()))
ans = []
for l in c2:
for word in c2[l]:
ans.append(word)
ans[0] = ans[0].capitalize()
return ' '.join(ans)
| Solution |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 50156,
"end": 51497
} | class ____(enum.Enum):
"""Generates identifiers that can be pased into the solver attached
to constraints, and then later retrieved to determine the origin of
those constraints when ``SpecBuilder`` creates Specs from the solve
result.
"""
CONDITIONAL_SPEC = 0
DEPENDS_ON = 1
REQUIRE = 2
@staticmethod
def _SUFFIXES() -> Dict["ConstraintOrigin", str]:
return {
ConstraintOrigin.CONDITIONAL_SPEC: "_cond",
ConstraintOrigin.DEPENDS_ON: "_dep",
ConstraintOrigin.REQUIRE: "_req",
}
@staticmethod
def append_type_suffix(pkg_id: str, kind: "ConstraintOrigin") -> str:
"""Given a package identifier and a constraint kind, generate a string ID."""
suffix = ConstraintOrigin._SUFFIXES()[kind]
return f"{pkg_id}{suffix}"
@staticmethod
def strip_type_suffix(source: str) -> Tuple[int, Optional[str]]:
"""Take a combined package/type ID generated by
``append_type_suffix``, and extract the package ID and
an associated weight.
"""
if not source:
return -1, None
for kind, suffix in ConstraintOrigin._SUFFIXES().items():
if source.endswith(suffix):
return kind.value, source[: -len(suffix)]
return -1, source
| ConstraintOrigin |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/routing_producer.py | {
"start": 1010,
"end": 1192
} | class ____(NamedTuple):
"""
A MessageRoute is a tuple of (producer, topic) that a message should be
routed to.
"""
producer: Producer
topic: Topic
| MessageRoute |
python | getsentry__sentry | src/sentry/spans/consumers/process_segments/enrichment.py | {
"start": 2071,
"end": 11207
} | class ____:
"""Enriches spans with information from their parent, child and sibling spans."""
def __init__(self, spans: list[SpanEvent]) -> None:
self._segment_span = _find_segment_span(spans)
self._ttid_ts = _timestamp_by_op(spans, "ui.load.initial_display")
self._ttfd_ts = _timestamp_by_op(spans, "ui.load.full_display")
self._span_intervals: dict[str, list[tuple[int, int]]] = {}
self._spans_by_id: dict[str, SpanEvent] = {}
for span in spans:
if "span_id" in span:
self._spans_by_id[span["span_id"]] = span
if parent_span_id := span.get("parent_span_id"):
interval = _span_interval(span)
self._span_intervals.setdefault(parent_span_id, []).append(interval)
def _attributes(self, span: SpanEvent) -> dict[str, Any]:
attributes: dict[str, Any] = {**(span.get("attributes") or {})}
def get_value(key: str) -> Any:
attr: dict[str, Any] = attributes.get(key) or {}
return attr.get("value")
if self._segment_span is not None:
# Assume that Relay has extracted the shared tags into `data` on the
# root span. Once `sentry_tags` is removed, the logic from
# `extract_shared_tags` should be moved here.
segment_attrs = self._segment_span.get("attributes") or {}
shared_attrs = {k: v for k, v in segment_attrs.items() if k in SHARED_SENTRY_ATTRIBUTES}
is_mobile = attribute_value(self._segment_span, "sentry.mobile") == "true"
mobile_start_type = _get_mobile_start_type(self._segment_span)
if is_mobile:
# NOTE: Like in Relay's implementation, shared tags are added at the
# very end. This does not have access to the shared tag value. We
# keep behavior consistent, although this should be revisited.
if get_value("sentry.thread.name") == MOBILE_MAIN_THREAD_NAME:
attributes["sentry.main_thread"] = {"type": "string", "value": "true"}
if not get_value("sentry.app_start_type") and mobile_start_type:
attributes["sentry.app_start_type"] = {
"type": "string",
"value": mobile_start_type,
}
if self._ttid_ts is not None and span["end_timestamp"] <= self._ttid_ts:
attributes["sentry.ttid"] = {"type": "string", "value": "ttid"}
if self._ttfd_ts is not None and span["end_timestamp"] <= self._ttfd_ts:
attributes["sentry.ttfd"] = {"type": "string", "value": "ttfd"}
for key, value in shared_attrs.items():
if attributes.get(key) is None:
attributes[key] = value
if get_span_op(span).startswith("gen_ai.") and "gen_ai.agent.name" not in attributes:
if (parent_span_id := span.get("parent_span_id")) is not None:
parent_span = self._spans_by_id.get(parent_span_id)
if (
parent_span is not None
and get_span_op(parent_span) == "gen_ai.invoke_agent"
and (agent_name := attribute_value(parent_span, "gen_ai.agent.name"))
is not None
):
attributes["gen_ai.agent.name"] = {
"type": "string",
"value": agent_name,
}
attributes["sentry.exclusive_time_ms"] = {
"type": "double",
"value": self._exclusive_time(span),
}
return attributes
def _iter_ancestors(self, span: SpanEvent) -> Iterator[SpanEvent]:
"""
Iterates over the ancestors of a span in order towards the root using the "parent_span_id" attribute.
"""
current: SpanEvent | None = span
parent_span_id: str | None = None
while current is not None:
parent_span_id = current.get("parent_span_id")
if parent_span_id is not None:
current = self._spans_by_id.get(parent_span_id)
else:
current = None
if current is not None:
yield current
else:
break
def _exclusive_time(self, span: SpanEvent) -> float:
"""
Sets the exclusive time on all spans in the list.
The exclusive time is the time spent in a span's own code. This is the sum
of all time intervals where no child span was active.
"""
intervals = self._span_intervals.get(span["span_id"], [])
# Sort by start ASC, end DESC to skip over nested intervals efficiently
intervals.sort(key=lambda x: (x[0], -x[1]))
exclusive_time_us: int = 0 # microseconds to prevent rounding issues
start, end = _span_interval(span)
# Progressively add time gaps before the next span and then skip to its end.
for child_start, child_end in intervals:
if child_start >= end:
break
if child_start > start:
exclusive_time_us += child_start - start
start = max(start, child_end)
# Add any remaining time not covered by children
exclusive_time_us += max(end - start, 0)
return exclusive_time_us / 1_000
def enrich_span(self, span: SpanEvent) -> SpanEvent:
attributes = self._attributes(span)
return {
**span,
"attributes": attributes,
}
@classmethod
def enrich_spans(cls, spans: list[SpanEvent]) -> tuple[int | None, list[SpanEvent]]:
inst = cls(spans)
ret = []
segment_idx = None
for i, span in enumerate(spans):
enriched = inst.enrich_span(span)
if span is inst._segment_span:
segment_idx = i
ret.append(enriched)
return segment_idx, ret
def _get_mobile_start_type(segment: SpanEvent) -> str | None:
"""
Check the measurements on the span to determine what kind of start type the
event is.
"""
attributes = segment.get("attributes") or {}
if "app_start_cold" in attributes:
return "cold"
if "app_start_warm" in attributes:
return "warm"
return None
def _timestamp_by_op(spans: list[SpanEvent], op: str) -> float | None:
for span in spans:
if get_span_op(span) == op:
return span["end_timestamp"]
return None
def _span_interval(span: SpanEvent) -> tuple[int, int]:
"""Get the start and end timestamps of a span in microseconds."""
return _us(span["start_timestamp"]), _us(span["end_timestamp"])
def _us(timestamp: float) -> int:
"""Convert the floating point duration or timestamp to integer microsecond
precision."""
return int(timestamp * 1_000_000)
def compute_breakdowns(
spans: Sequence[SpanEvent],
breakdowns_config: dict[str, dict[str, Any]],
) -> dict[str, Attribute]:
"""
Computes breakdowns from all spans and writes them to the segment span.
Breakdowns are measurements that are derived from the spans in the segment.
By convention, their unit is in milliseconds. In the end, these measurements
are converted into attributes on the span trace item.
"""
ret: dict[str, Attribute] = {}
for breakdown_name, breakdown_config in breakdowns_config.items():
ty = breakdown_config.get("type")
if ty == "spanOperations":
breakdowns = _compute_span_ops(spans, breakdown_config)
else:
continue
for key, value in breakdowns.items():
ret[f"{breakdown_name}.{key}"] = {"value": value, "type": "double"}
return ret
def _compute_span_ops(spans: Sequence[SpanEvent], config: Any) -> dict[str, float]:
matches = config.get("matches")
if not matches:
return {}
intervals_by_op = defaultdict(list)
for span in spans:
op = get_span_op(span)
if operation_name := next(filter(lambda m: op.startswith(m), matches), None):
intervals_by_op[operation_name].append(_span_interval(span))
ret: dict[str, float] = {}
for operation_name, intervals in intervals_by_op.items():
duration = _get_duration_us(intervals)
ret[f"ops.{operation_name}"] = duration / 1000 # unit: millisecond
return ret
def _get_duration_us(intervals: list[tuple[int, int]]) -> int:
"""
Get the wall clock time duration covered by the intervals in microseconds.
Overlapping intervals are merged so that they are not counted twice. For
example, the intervals [(1, 3), (2, 4)] would yield a duration of 3, not 4.
"""
duration = 0
last_end = 0
intervals.sort(key=lambda x: (x[0], -x[1]))
for start, end in intervals:
# Ensure the current interval doesn't overlap with the last one
start = max(start, last_end)
duration += max(end - start, 0)
last_end = end
return duration
| TreeEnricher |
python | dask__distributed | distributed/metrics.py | {
"start": 4792,
"end": 11163
} | class ____:
"""Context-based general purpose meter.
Usage
-----
1. In high level code, call :meth:`add_callback` to install a hook that defines an
activity
2. In low level code, typically many stack levels below, log quantitative events
(e.g. elapsed time, transferred bytes, etc.) so that they will be attributed to
the high-level code calling it, either with :meth:`meter`,
:meth:`meter_function`, or :meth:`digest_metric`.
Examples
--------
In the code that e.g. sends a Python object from A to B over the network:
>>> from distributed.metrics import context_meter
>>> with context_meter.add_callback(partial(print, "A->B comms:")):
... await send_over_the_network(obj)
In the serialization utilities, called many stack levels below:
>>> with context_meter.meter("dumps"):
... pik = pickle.dumps(obj)
>>> with context_meter.meter("compress"):
... pik = lz4.compress(pik)
And finally, elsewhere, deep into the TCP stack:
>>> with context_meter.meter("network-write"):
... await comm.write(frames)
When you call the top-level code, you'll get::
A->B comms: dumps 0.012 seconds
A->B comms: compress 0.034 seconds
A->B comms: network-write 0.567 seconds
"""
_callbacks: ContextVar[dict[Hashable, Callable[[Hashable, float, str], None]]]
def __init__(self):
self._callbacks = ContextVar(
f"MetricHook<{id(self)}>._callbacks", default={} # noqa: B039
)
def __reduce__(self):
assert self is context_meter, "Found copy of singleton"
return self._unpickle_singleton, ()
@staticmethod
def _unpickle_singleton():
return context_meter
@contextmanager
def add_callback(
self,
callback: Callable[[Hashable, float, str], None],
*,
key: Hashable | None = None,
allow_offload: bool = False,
) -> Iterator[None]:
"""Add a callback when entering the context and remove it when exiting it.
The callback must accept the same parameters as :meth:`digest_metric`.
Parameters
----------
callback: Callable
``f(label, value, unit)`` to be executed
key: Hashable, optional
Unique key for the callback. If two nested calls to ``add_callback`` use the
same key, suppress the outermost callback.
allow_offload: bool, optional
If set to True, this context must be executed inside a running asyncio
event loop. If a call to :meth:`digest_metric` is performed from a different
thread, e.g. from inside :func:`distributed.utils.offload`, ensure that
the callback is executed in the event loop's thread instead.
"""
if allow_offload:
loop = asyncio.get_running_loop()
tid = threading.get_ident()
def safe_cb(label: Hashable, value: float, unit: str, /) -> None:
if threading.get_ident() == tid:
callback(label, value, unit)
else: # We're inside offload()
loop.call_soon_threadsafe(callback, label, value, unit)
else:
safe_cb = callback
if key is None:
key = object()
cbs = self._callbacks.get()
cbs = cbs.copy()
cbs[key] = safe_cb
tok = self._callbacks.set(cbs)
try:
yield
finally:
tok.var.reset(tok)
@contextmanager
def clear_callbacks(self) -> Iterator[None]:
"""Do not trigger any callbacks set outside of this context"""
tok = self._callbacks.set({})
try:
yield
finally:
tok.var.reset(tok)
def digest_metric(self, label: Hashable, value: float, unit: str) -> None:
"""Invoke the currently set context callbacks for an arbitrary quantitative
metric.
"""
cbs = self._callbacks.get()
for cb in cbs.values():
cb(label, value, unit)
@contextmanager
def meter(
self,
label: Hashable,
unit: str = "seconds",
func: Callable[[], float] = timemod.perf_counter,
floor: float | Literal[False] = 0.0,
) -> Iterator[MeterOutput]:
"""Convenience context manager or decorator which calls func() before and after
the wrapped code, calculates the delta, and finally calls :meth:`digest_metric`.
If unit=='seconds', it also subtracts any other calls to :meth:`meter` or
:meth:`digest_metric` with the same unit performed within the context, so that
the total is strictly additive.
Parameters
----------
label: Hashable
label to pass to the callback
unit: str, optional
unit to pass to the callback. Default: seconds
func: callable
see :func:`meter`
floor: bool, optional
see :func:`meter`
Yields
------
:class:`MeterOutput` where the ``start`` attribute is populated straight away,
while ``stop`` and ``delta`` are nan until context exit. In case of multiple
nested calls to :meth:`meter`, then delta (for seconds only) is reduced by the
inner metrics, to a minimum of ``floor``.
"""
if unit != "seconds":
try:
with meter(func, floor=floor) as m:
yield m
finally:
self.digest_metric(label, m.delta, unit)
return
# If unit=="seconds", subtract time metered from the sub-contexts
offsets = []
def callback(label2: Hashable, value2: float, unit2: str) -> None:
if unit2 == unit:
# This must be threadsafe to support callbacks invoked from
# distributed.utils.offload; '+=' on a float would not be threadsafe!
offsets.append(value2)
try:
with self.add_callback(callback), meter(func, floor=False) as m:
yield m
finally:
delta = m.delta - sum(offsets)
if floor is not False:
delta = max(floor, delta)
m.delta = delta
self.digest_metric(label, delta, unit)
context_meter = ContextMeter()
| ContextMeter |
python | PrefectHQ__prefect | tests/runtime/test_flow_run.py | {
"start": 18842,
"end": 20808
} | class ____:
async def test_root_flow_run_id_is_attribute(self):
assert "root_flow_run_id" in dir(flow_run)
async def test_root_flow_run_id_is_empty_when_not_set(self):
assert flow_run.root_flow_run_id is None
async def test_root_flow_run_id_pulls_from_api_when_needed(
self, monkeypatch: pytest.MonkeyPatch, prefect_client: PrefectClient
):
assert flow_run.root_flow_run_id is None
root_flow_run = await prefect_client.create_flow_run(
flow=Flow(fn=lambda: None, name="root"),
parameters={"x": "foo", "y": "bar"},
parent_task_run_id=None,
)
@task
def root_task():
return 1
root_task_run = await prefect_client.create_task_run(
task=root_task,
dynamic_key="1",
flow_run_id=root_flow_run.id,
)
child_flow_run = await prefect_client.create_flow_run(
flow=Flow(fn=lambda: None, name="child"),
parameters={"x": "foo", "y": "bar"},
parent_task_run_id=root_task_run.id,
)
@task
def child_task():
return 1
child_task_run = await prefect_client.create_task_run(
task=child_task,
dynamic_key="1",
flow_run_id=child_flow_run.id,
)
deep_flow_run = await prefect_client.create_flow_run(
flow=Flow(fn=lambda: None, name="deep"),
parameters={"x": "foo", "y": "bar"},
parent_task_run_id=child_task_run.id,
)
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(deep_flow_run.id))
assert (
flow_run.root_flow_run_id
== str(root_flow_run.id)
== str(root_task_run.flow_run_id)
)
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(root_flow_run.id))
assert flow_run.root_flow_run_id == str(root_flow_run.id)
| TestRootFlowRunId |
python | urllib3__urllib3 | dummyserver/testcase.py | {
"start": 9250,
"end": 9532
} | class ____(HypercornDummyProxyTestCase):
http_host = "localhost"
http_host_alt = "127.0.0.1"
https_host = "localhost"
https_host_alt = "127.0.0.1"
https_certs = DEFAULT_CERTS
proxy_host = "::1"
proxy_host_alt = "127.0.0.1"
| IPv6HypercornDummyProxyTestCase |
python | mlflow__mlflow | dev/clint/src/clint/rules/use_walrus_operator.py | {
"start": 3450,
"end": 5656
} | class ____(ast.NodeVisitor):
"""Visits all statement blocks to check for walrus operator opportunities."""
def __init__(self) -> None:
self.violations: list[ast.stmt] = []
def _check_stmts(self, stmts: list[ast.stmt]) -> None:
for idx, stmt in enumerate(stmts[1:], start=1):
if isinstance(stmt, ast.If):
prev_stmt = stmts[idx - 1]
following_stmts = stmts[idx + 1 :]
if UseWalrusOperator.check(stmt, prev_stmt, following_stmts):
self.violations.append(prev_stmt)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
self._check_stmts(node.body)
self.generic_visit(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
self._check_stmts(node.body)
self.generic_visit(node)
def visit_If(self, node: ast.If) -> None:
self._check_stmts(node.body)
self._check_stmts(node.orelse)
self.generic_visit(node)
def visit_For(self, node: ast.For) -> None:
self._check_stmts(node.body)
self._check_stmts(node.orelse)
self.generic_visit(node)
def visit_AsyncFor(self, node: ast.AsyncFor) -> None:
self._check_stmts(node.body)
self._check_stmts(node.orelse)
self.generic_visit(node)
def visit_While(self, node: ast.While) -> None:
self._check_stmts(node.body)
self._check_stmts(node.orelse)
self.generic_visit(node)
def visit_With(self, node: ast.With) -> None:
self._check_stmts(node.body)
self.generic_visit(node)
def visit_AsyncWith(self, node: ast.AsyncWith) -> None:
self._check_stmts(node.body)
self.generic_visit(node)
def visit_Try(self, node: ast.Try) -> None:
self._check_stmts(node.body)
for handler in node.handlers:
self._check_stmts(handler.body)
self._check_stmts(node.orelse)
self._check_stmts(node.finalbody)
self.generic_visit(node)
def visit_Match(self, node: ast.Match) -> None:
for case in node.cases:
self._check_stmts(case.body)
self.generic_visit(node)
| WalrusOperatorVisitor |
python | graphql-python__graphene | graphene/relay/connection.py | {
"start": 2429,
"end": 4191
} | class ____(ObjectType):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls, node=None, name=None, strict_types=False, _meta=None, **options
):
if not _meta:
_meta = ConnectionOptions(cls)
assert node, f"You have to provide a node in {cls.__name__}.Meta"
assert isinstance(node, NonNull) or issubclass(
node, (Scalar, Enum, ObjectType, Interface, Union, NonNull)
), f'Received incompatible node "{node}" for Connection {cls.__name__}.'
base_name = re.sub("Connection$", "", name or cls.__name__) or node._meta.name
if not name:
name = f"{base_name}Connection"
options["name"] = name
_meta.node = node
if not _meta.fields:
_meta.fields = {}
if "page_info" not in _meta.fields:
_meta.fields["page_info"] = Field(
PageInfo,
name="pageInfo",
required=True,
description="Pagination data for this connection.",
)
if "edges" not in _meta.fields:
edge_class = get_edge_class(cls, node, base_name, strict_types) # type: ignore
cls.Edge = edge_class
_meta.fields["edges"] = Field(
NonNull(List(NonNull(edge_class) if strict_types else edge_class)),
description="Contains the nodes in this connection.",
)
return super(Connection, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
# noinspection PyPep8Naming
def connection_adapter(cls, edges, pageInfo):
"""Adapter for creating Connection instances"""
return cls(edges=edges, page_info=pageInfo)
| Connection |
python | django__django | tests/gis_tests/rasterapp/migrations/0002_rastermodels.py | {
"start": 114,
"end": 2159
} | class ____(migrations.Migration):
dependencies = [
("rasterapp", "0001_setup_extensions"),
]
operations = [
migrations.CreateModel(
name="RasterModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"rast",
models.fields.RasterField(
blank=True,
null=True,
srid=4326,
verbose_name="A Verbose Raster Name",
),
),
(
"rastprojected",
models.fields.RasterField(
null=True,
srid=3086,
verbose_name="A Projected Raster Table",
),
),
("geom", models.fields.PointField(null=True, srid=4326)),
],
options={
"required_db_features": ["supports_raster"],
},
),
migrations.CreateModel(
name="RasterRelatedModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"rastermodel",
models.ForeignKey(
on_delete=deletion.CASCADE,
to="rasterapp.rastermodel",
),
),
],
options={
"required_db_features": ["supports_raster"],
},
),
]
| Migration |
python | qdrant__qdrant-client | qdrant_client/http/api_client.py | {
"start": 6274,
"end": 9649
} | class ____:
def __init__(self, host: str = None, **kwargs: Any) -> None:
self.host = host
self.middleware: AsyncMiddlewareT = BaseAsyncMiddleware()
self._async_client = AsyncClient(**kwargs)
@overload
async def request(
self, *, type_: Type[T], method: str, url: str, path_params: Dict[str, Any] = None, **kwargs: Any
) -> T:
...
@overload # noqa F811
async def request(
self, *, type_: None, method: str, url: str, path_params: Dict[str, Any] = None, **kwargs: Any
) -> None:
...
async def request( # noqa F811
self, *, type_: Any, method: str, url: str, path_params: Dict[str, Any] = None, **kwargs: Any
) -> Any:
if path_params is None:
path_params = {}
host = self.host if self.host.endswith("/") else self.host + "/"
url = url[1:] if url.startswith("/") else url
# in order to do a correct join, url join requires base_url to end with /, and url to not start with /,
# since url is treated as an absolute path and might truncate prefix in base_url
url = urljoin(host, url.format(**path_params))
request = self._async_client.build_request(method, url, **kwargs)
return await self.send(request, type_)
@overload
def request_sync(self, *, type_: Type[T], **kwargs: Any) -> T:
...
@overload # noqa F811
def request_sync(self, *, type_: None, **kwargs: Any) -> None:
...
def request_sync(self, *, type_: Any, **kwargs: Any) -> Any: # noqa F811
"""
This method is not used by the generated apis, but is included for convenience
"""
return get_event_loop().run_until_complete(self.request(type_=type_, **kwargs))
async def send(self, request: Request, type_: Type[T]) -> T:
response = await self.middleware(request, self.send_inner)
if response.status_code == 429:
retry_after_s = response.headers.get("Retry-After", None)
try:
resp = response.json()
message = resp["status"]["error"] if resp["status"] and resp["status"]["error"] else ""
except Exception:
message = ""
if retry_after_s:
raise ResourceExhaustedResponse(message, retry_after_s)
if response.status_code in [200, 201, 202]:
try:
return parse_as_type(response.json(), type_)
except ValidationError as e:
raise ResponseHandlingException(e)
raise UnexpectedResponse.for_response(response)
async def send_inner(self, request: Request) -> Response:
try:
response = await self._async_client.send(request)
except Exception as e:
raise ResponseHandlingException(e)
return response
async def aclose(self) -> None:
await self._async_client.aclose()
def add_middleware(self, middleware: AsyncMiddlewareT) -> None:
current_middleware = self.middleware
async def new_middleware(request: Request, call_next: SendAsync) -> Response:
async def inner_send(request: Request) -> Response:
return await current_middleware(request, call_next)
return await middleware(request, inner_send)
self.middleware = new_middleware
| AsyncApiClient |
python | fastapi__sqlmodel | docs_src/tutorial/connect/insert/tutorial001.py | {
"start": 246,
"end": 1911
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | spyder-ide__spyder | spyder/utils/qthelpers.py | {
"start": 19427,
"end": 19988
} | class ____(QAction):
"""Spyder QAction class wrapper to handle cross platform patches."""
def __init__(self, *args, action_id=None, **kwargs):
"""Spyder QAction class wrapper to handle cross platform patches."""
super().__init__(*args, **kwargs)
self.action_id = action_id
if sys.platform == "darwin":
self.setIconVisibleInMenu(False)
def __str__(self):
return "SpyderAction('{0}')".format(self.text())
def __repr__(self):
return "SpyderAction('{0}')".format(self.text())
| SpyderAction |
python | run-llama__llama_index | llama-index-core/tests/tools/test_types.py | {
"start": 238,
"end": 1220
} | class ____(BaseModel):
inner: Inner
def test_toolmetadata_openai_tool_description_max_length() -> None:
openai_tool_description_limit = 1024
valid_description = "a" * openai_tool_description_limit
invalid_description = "a" * (1 + openai_tool_description_limit)
ToolMetadata(valid_description).to_openai_tool()
ToolMetadata(invalid_description).to_openai_tool(skip_length_check=True)
with pytest.raises(ValueError):
ToolMetadata(invalid_description).to_openai_tool()
def test_nested_tool_schema() -> None:
tool = get_function_tool(Outer)
schema = tool.metadata.get_parameters_dict()
assert "$defs" in schema
defs = schema["$defs"]
assert "Inner" in defs
inner = defs["Inner"]
assert inner["required"][0] == "name"
assert inner["properties"] == {"name": {"title": "Name", "type": "string"}}
assert schema["required"][0] == "inner"
assert schema["properties"] == {"inner": {"$ref": "#/$defs/Inner"}}
| Outer |
python | gwtw__py-sorting | test/counting_sort_test.py | {
"start": 209,
"end": 404
} | class ____(unittest.TestCase,
BasePositiveIntegerSortTest):
def setUp(self):
self.sort = counting_sort.sort
if __name__ == '__main__':
unittest.main()
| CountingSortTest |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 2149,
"end": 6140
} | class ____(Instruction):
def __init__(self, parent, func, args, name='', cconv=None, tail=None,
fastmath=(), attrs=(), arg_attrs=None):
self.cconv = (func.calling_convention
if cconv is None and isinstance(func, Function)
else cconv)
# For backwards compatibility with previous API of accepting a "truthy"
# value for a hint to the optimizer to potentially tail optimize.
if isinstance(tail, str) and tail in TailMarkerOptions:
pass
elif tail:
tail = "tail"
else:
tail = ""
self.tail = tail
self.fastmath = FastMathFlags(fastmath)
self.attributes = CallInstrAttributes(attrs)
self.arg_attributes = {}
if arg_attrs:
for idx, attrs in arg_attrs.items():
if not (0 <= idx < len(args)):
raise ValueError("Invalid argument index {}"
.format(idx))
self.arg_attributes[idx] = ArgumentAttributes(attrs)
# Fix and validate arguments
args = list(args)
for i in range(len(func.function_type.args)):
arg = args[i]
expected_type = func.function_type.args[i]
if (isinstance(expected_type, types.MetaDataType) and
arg.type != expected_type):
arg = MetaDataArgument(arg)
if arg.type != expected_type:
msg = ("Type of #{0} arg mismatch: {1} != {2}"
.format(1 + i, expected_type, arg.type))
raise TypeError(msg)
args[i] = arg
super(CallInstr, self).__init__(parent, func.function_type.return_type,
"call", [func] + list(args), name=name)
@property
def callee(self):
return self.operands[0]
@callee.setter
def callee(self, newcallee):
self.operands[0] = newcallee
@property
def args(self):
return self.operands[1:]
def replace_callee(self, newfunc):
if newfunc.function_type != self.callee.function_type:
raise TypeError("New function has incompatible type")
self.callee = newfunc
@property
def called_function(self):
"""The callee function"""
return self.callee
def _descr(self, buf, add_metadata):
def descr_arg(i, a):
if i in self.arg_attributes:
attrs = ' '.join(self.arg_attributes[i]._to_list(a.type)) + ' '
else:
attrs = ''
return '{0} {1}{2}'.format(a.type, attrs, a.get_reference())
args = ', '.join([descr_arg(i, a) for i, a in enumerate(self.args)])
fnty = self.callee.function_type
# Only print function type if variable-argument
if fnty.var_arg:
ty = fnty
# Otherwise, just print the return type.
else:
# Fastmath flag work only in this case
ty = fnty.return_type
callee_ref = "{0} {1}".format(ty, self.callee.get_reference())
if self.cconv:
callee_ref = "{0} {1}".format(self.cconv, callee_ref)
tail_marker = ""
if self.tail:
tail_marker = "{0} ".format(self.tail)
fn_attrs = ' ' + ' '.join(self.attributes._to_list(fnty.return_type))\
if self.attributes else ''
fm_attrs = ' ' + ' '.join(self.fastmath._to_list(fnty.return_type))\
if self.fastmath else ''
buf.append("{tail}{op}{fastmath} {callee}({args}){attr}{meta}\n".format(
tail=tail_marker,
op=self.opname,
callee=callee_ref,
fastmath=fm_attrs,
args=args,
attr=fn_attrs,
meta=(self._stringify_metadata(leading_comma=True)
if add_metadata else ""),
))
def descr(self, buf):
self._descr(buf, add_metadata=True)
| CallInstr |
python | walkccc__LeetCode | solutions/2742. Painting the Walls/2742.py | {
"start": 0,
"end": 456
} | class ____:
def paintWalls(self, cost: list[int], time: list[int]) -> int:
n = len(cost)
@functools.lru_cache(None)
def dp(i: int, walls: int) -> int:
"""Returns the minimum cost to paint j walls by painters[i..n)."""
if walls <= 0:
return 0
if i == n:
return math.inf
pick = cost[i] + dp(i + 1, walls - time[i] - 1)
skip = dp(i + 1, walls)
return min(pick, skip)
return dp(0, n)
| Solution |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 42839,
"end": 44151
} | class ____(BaseAPIIntegrationTest):
@pytest.mark.xfail(reason='Output of docker top depends on host distro, '
'and is not formalized.')
def test_top(self):
container = self.client.create_container(
TEST_IMG, ['sleep', '60']
)
self.tmp_containers.append(container)
self.client.start(container)
res = self.client.top(container)
if not IS_WINDOWS_PLATFORM:
assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND']
assert len(res['Processes']) == 1
assert res['Processes'][0][-1] == 'sleep 60'
self.client.kill(container)
@pytest.mark.skipif(
IS_WINDOWS_PLATFORM, reason='No psargs support on windows'
)
@pytest.mark.xfail(reason='Output of docker top depends on host distro, '
'and is not formalized.')
def test_top_with_psargs(self):
container = self.client.create_container(
TEST_IMG, ['sleep', '60'])
self.tmp_containers.append(container)
self.client.start(container)
res = self.client.top(container, '-eopid,user')
assert res['Titles'] == ['PID', 'USER']
assert len(res['Processes']) == 1
assert res['Processes'][0][10] == 'sleep 60'
| ContainerTopTest |
python | spyder-ide__spyder | spyder/api/widgets/mixins.py | {
"start": 24880,
"end": 26049
} | class ____:
"""
Mixin with additional functionality for the QMainWindow's used in Spyder.
"""
def _is_on_visible_screen(self: QMainWindow):
"""Detect if the window is placed on a visible screen."""
x, y = self.geometry().x(), self.geometry().y()
qapp = QApplication.instance()
current_screen = qapp.screenAt(QPoint(x, y))
if current_screen is None:
return False
else:
return True
def move_to_primary_screen(self: QMainWindow):
"""Move the window to the primary screen if necessary."""
if self._is_on_visible_screen():
return
qapp = QApplication.instance()
primary_screen_geometry = qapp.primaryScreen().availableGeometry()
x, y = primary_screen_geometry.x(), primary_screen_geometry.y()
if self.isMaximized():
self.showNormal()
self.move(QPoint(x, y))
# With this we want to maximize only the Spyder main window and not the
# plugin ones, which usually are not maximized.
if not hasattr(self, 'is_window_widget'):
self.showMaximized()
| SpyderMainWindowMixin |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 326810,
"end": 331425
} | class ____(torch.nn.Module):
def forward(self, t):
t: "f32[2, 3]";
t, = fx_pytree.tree_flatten_spec(([t], {}), self._in_spec)
_guards_fn = self._guards_fn(t); _guards_fn = None
sum_1: "f32[]" = torch.ops.aten.sum.default(t)
_assert_tensor_metadata_default = torch.ops.aten._assert_tensor_metadata.default(sum_1, dtype = torch.float32, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default = None
to: "i64[]" = torch.ops.aten.to.dtype(sum_1, torch.int64); sum_1 = None
item: "Sym(u0)" = torch.ops.aten.item.default(to); to = None
sin: "f32[2, 3]" = torch.ops.aten.sin.default(t)
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (2, 3, 1, 1, 1, 3, item, sin), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = item = sin = None
getitem_8: "Sym(u8)" = while_loop[0]
getitem_9: "Sym(u9)" = while_loop[1]
getitem_10: "Sym(u10)" = while_loop[2]
getitem_11: "Sym(u11)" = while_loop[3]
getitem_12: "Sym(u12)" = while_loop[4]
getitem_13: "Sym(u13)" = while_loop[5]
getitem_14: "Sym(u14)" = while_loop[6]
getitem_7: "f32[2, 3]" = while_loop[7]; while_loop = None
add: "Sym(u8 + 1)" = getitem_8 + 1
add_1: "Sym(u9 + 1)" = getitem_9 + 1
add_2: "Sym(u10 + 1)" = getitem_10 + 1
add_3: "Sym(u11 + 1)" = getitem_11 + 1
add_4: "Sym(u12 + 1)" = getitem_12 + 1
add_5: "Sym(u13 + 1)" = getitem_13 + 1
add_6: "Sym(u14 + 1)" = getitem_14 + 1
add_7: "f32[2, 3]" = torch.ops.aten.add.Tensor(getitem_7, 1)
add_8: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_8); getitem_8 = None
add_9: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_9); getitem_9 = None
add_10: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_10); getitem_10 = None
add_11: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_11); getitem_11 = None
add_12: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_12); getitem_12 = None
add_13: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_13); getitem_13 = None
add_14: "f32[2, 3]" = torch.ops.aten.add.Tensor(t, getitem_14); getitem_14 = None
add_15: "f32[2, 3]" = torch.ops.aten.add.Tensor(getitem_7, t); getitem_7 = t = None
return pytree.tree_unflatten((add, add_1, add_2, add_3, add_4, add_5, add_6, add_7, add_8, add_9, add_10, add_11, add_12, add_13, add_14, add_15), self._out_spec)
class while_loop_cond_graph_0(torch.nn.Module):
def forward(self, a_1: "Sym(u1)", b_1: "Sym(u2)", c1_1: "Sym(u3)", c2_1: "Sym(u4)", c3_1: "Sym(u5)", c0_1: "Sym(u6)", u0_1: "Sym(u7)", x_1: "f32[2, 3]"):
mul: "Sym(u3*u4)" = c1_1 * c2_1; c1_1 = c2_1 = None
mul_1: "Sym(u3*u4*u5)" = mul * c3_1; mul = c3_1 = None
mul_2: "Sym(u1*u2)" = a_1 * b_1; a_1 = b_1 = None
lt: "Sym(u3*u4*u5 < u1*u2)" = mul_1 < mul_2; mul_1 = mul_2 = None
return lt
class while_loop_body_graph_0(torch.nn.Module):
def forward(self, a_1: "Sym(u1)", b_1: "Sym(u2)", c1_1: "Sym(u3)", c2_1: "Sym(u4)", c3_1: "Sym(u5)", c0_1: "Sym(u6)", u0_1: "Sym(u7)", x_1: "f32[2, 3]"):
add: "Sym(u7 + 1)" = u0_1 + 1; u0_1 = None
add_1: "f32[2, 3]" = torch.ops.aten.add.Tensor(x_1, 1); x_1 = None
return (b_1, c1_1, c2_1, c3_1, a_1, 0, add, add_1)
""", # noqa: B950
)
@skipIfTorchDynamo("Graph is not captured correctly when test with dynamo")
@parametrize("dynamic", [True, False])
@parametrize("backend", ["eager", "aot_eager"])
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_while_loop_op_constant_and_symint_output_compile(self, dynamic, backend):
m, args = WHILE_LOOP_TESTS["const_and_symint_output"]
if backend == "eager":
backend = EagerAndRecordGraphs()
self._check_compile(m, args, dynamic=dynamic, backend=backend)
if (
isinstance(backend, EagerAndRecordGraphs)
# cross ref or dynamic gives a slightly different graph
and not dynamic
and not TEST_WITH_CROSSREF
):
self.assertEqual(len(backend.graphs), 1)
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | django__django | tests/model_formsets/models.py | {
"start": 5379,
"end": 5777
} | class ____(models.Model):
title = models.CharField(max_length=50, unique_for_date="posted", blank=True)
slug = models.CharField(max_length=50, unique_for_year="posted", blank=True)
subtitle = models.CharField(max_length=50, unique_for_month="posted", blank=True)
posted = models.DateField()
def __str__(self):
return self.title
# Models for testing UUID primary keys
| Post |
python | google__jax | tests/lax_scipy_sparse_test.py | {
"start": 1896,
"end": 2040
} | class ____:
def __init__(self, A):
self.A = A
self.shape = self.A.shape
def __matmul__(self, x):
return self.A @ x
| CustomOperator |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 77784,
"end": 78109
} | class ____(TypedDict):
type: Literal['no-info']
function: NoInfoWrapValidatorFunction
# (input_value: Any, validator: ValidatorFunctionWrapHandler, info: ValidationInfo, /) -> Any
WithInfoWrapValidatorFunction = Callable[[Any, ValidatorFunctionWrapHandler, ValidationInfo[Any]], Any]
| NoInfoWrapValidatorFunctionSchema |
python | pydantic__pydantic | tests/typechecking/with_config_decorator.py | {
"start": 164,
"end": 234
} | class ____(TypedDict):
a: str
@with_config(str_to_lower=True)
| Model1 |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 15793,
"end": 16198
} | class ____(Expr):
"""Represent a literal (known) value as an `Expr`"""
_parameters = ["value"]
def _divisions(self):
return (None, None)
@functools.cached_property
def _meta(self):
return make_meta(self.value)
def _task(self, name: Key, index: int) -> Task:
assert index == 0
return DataNode(name, self.value) # type: ignore[return-value]
| Literal |
python | django__django | tests/select_related_regress/models.py | {
"start": 2527,
"end": 2600
} | class ____(Fowl):
mother = models.ForeignKey(Hen, models.CASCADE)
| Chick |
python | allegroai__clearml | clearml/backend_api/services/v2_20/workers.py | {
"start": 61691,
"end": 68065
} | class ____(Request):
"""
Returns statistics for the selected workers and time range aggregated by date intervals.
:param worker_ids: List of worker ids to collect metrics for. If not provided
or empty then all the company workers metrics are analyzed.
:type worker_ids: Sequence[str]
:param from_date: Starting time (in seconds from epoch) for collecting statistics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting statistics
:type to_date: float
:param interval: Time interval in seconds for a single statistics point. The minimal value is 1
:type interval: int
:param items: List of metric keys and requested statistics
:type items: Sequence[StatItem]
:param split_by_variant: If true then break statistics by hardware sub types
:type split_by_variant: bool
"""
_service = "workers"
_action = "get_stats"
_version = "2.20"
_schema = {
"definitions": {
"aggregation_type": {
"description": "Metric aggregation type",
"enum": ["avg", "min", "max"],
"type": "string",
},
"stat_item": {
"properties": {
"category": {
"oneOf": [
{"$ref": "#/definitions/aggregation_type"},
{"type": "null"},
]
},
"key": {
"description": "Name of a metric",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single statistics point. The minimal value is 1",
"type": "integer",
},
"items": {
"description": "List of metric keys and requested statistics",
"items": {"$ref": "#/definitions/stat_item"},
"type": "array",
},
"split_by_variant": {
"default": False,
"description": "If true then break statistics by hardware sub types",
"type": "boolean",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"worker_ids": {
"description": "List of worker ids to collect metrics for. If not provided or empty then all the company workers metrics are analyzed.",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"required": ["from_date", "to_date", "interval", "items"],
"type": "object",
}
def __init__(
self,
from_date: float,
to_date: float,
interval: int,
items: List[Any],
worker_ids: Optional[List[str]] = None,
split_by_variant: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetStatsRequest, self).__init__(**kwargs)
self.worker_ids = worker_ids
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.items = items
self.split_by_variant = split_by_variant
@schema_property("worker_ids")
def worker_ids(self) -> Optional[List[str]]:
return self._property_worker_ids
@worker_ids.setter
def worker_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_worker_ids = None
return
self.assert_isinstance(value, "worker_ids", (list, tuple))
self.assert_isinstance(value, "worker_ids", six.string_types, is_array=True)
self._property_worker_ids = value
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property("items")
def items(self) -> List[Any]:
return self._property_items
@items.setter
def items(self, value: List[Any]) -> None:
if value is None:
self._property_items = None
return
self.assert_isinstance(value, "items", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [StatItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "items", StatItem, is_array=True)
self._property_items = value
@schema_property("split_by_variant")
def split_by_variant(self) -> Optional[bool]:
return self._property_split_by_variant
@split_by_variant.setter
def split_by_variant(self, value: Optional[bool]) -> None:
if value is None:
self._property_split_by_variant = None
return
self.assert_isinstance(value, "split_by_variant", (bool,))
self._property_split_by_variant = value
| GetStatsRequest |
python | PyCQA__isort | tests/unit/test_exceptions.py | {
"start": 937,
"end": 1232
} | class ____(TestISortError):
def setup_class(self):
self.instance: exceptions.FileSkipped = exceptions.FileSkipped("message", "file_path")
def test_variables(self):
assert self.instance.file_path == "file_path"
assert str(self.instance) == "message"
| TestFileSkipped |
python | patrick-kidger__equinox | equinox/internal/_noinline.py | {
"start": 5006,
"end": 8985
} | class ____(Module):
batch_axes: PyTree[int | None]
def __call__(self, static_fn):
return filter_vmap(static_fn, in_axes=(self.batch_axes,))
@filter_primitive_def
def _noinline_impl(dynamic_index, abstract_fn, transforms, args):
del abstract_fn
static_fn = _index_to_fn[dynamic_index.item()]
for transform in transforms:
static_fn = transform(static_fn)
return static_fn(args)
@_only_shapedarrays
@filter_primitive_def
def _noinline_abstract(dynamic_index, abstract_fn, transforms, args):
del dynamic_index
dynamic, static = hashable_partition(
(abstract_fn, transforms, args), _is_shapedarray
)
flat_dynamic, treedef_dynamic = jtu.tree_flatten(dynamic)
key = tuple((x.shape, x.dtype) for x in flat_dynamic), treedef_dynamic, static
out_struct = _cache_filter_eval_shape(key)
return jtu.tree_map(_to_shapedarray, out_struct)
@filter_primitive_jvp
def _noinline_jvp(primals, tangents):
# TODO: add custom partial-eval rule to avoid the double-noinline?
dynamic_index, abstract_fn, transforms, args = primals
t_dynamic_index, t_abstract_fn, t_transforms, t_args = tangents
assert (
len(jtu.tree_leaves((t_dynamic_index, t_abstract_fn, t_transforms))) == 0
) # all none
del t_dynamic_index, t_abstract_fn, t_transforms
tangents = jtu.tree_map(materialise_zeros, args, t_args, is_leaf=_is_none)
primal_outs = filter_primitive_bind(noinline_p, *primals)
tangent_outs = filter_primitive_bind(
noinline_p,
dynamic_index,
abstract_fn,
transforms + [_jvp_transform],
(args, t_args),
)
return primal_outs, tangent_outs
@filter_primitive_transpose(materialise_zeros=True) # pyright: ignore
def _noinline_transpose(inputs, cts_out):
dynamic_index, abstract_fn, transforms, args = inputs
assert all(
not _is_undefined(x)
for x in jtu.tree_leaves(
(dynamic_index, abstract_fn, transforms), is_leaf=_is_undefined
)
)
# Note that `defined` may also include non-JAX-arrays
undefined, defined = partition(args, _is_undefined, is_leaf=_is_undefined)
undefined = jtu.tree_map(lambda x: x.aval, undefined, is_leaf=_is_undefined)
cts_args = filter_primitive_bind(
noinline_p,
dynamic_index,
abstract_fn,
transforms + [_MetaTransposeTransform(undefined)],
(defined, cts_out),
)
cts_rest = jtu.tree_map(lambda _: None, (dynamic_index, abstract_fn, transforms))
return cts_rest + (cts_args,)
@filter_primitive_batching
def _noinline_batch(inputs, batch_axes):
dynamic_index, abstract_fn, transforms, args = inputs
dynamic_index_bdim, abstract_fn_bdim, transforms_bdim, args_bdim = batch_axes
assert len(jtu.tree_leaves((abstract_fn_bdim, transforms_bdim))) == 0 # all none
if dynamic_index_bdim is not None:
# The batch rule for `lax.cond` with vmap'd predicate simply
# broadcasts all constants in the branches. In particular it may broadcast
# this. We simply need to ignore this and return to having a single dynamic
# index.
# This is actually a silent error if you do something exceptionally silly, and
# try to manually combine two different `noinline`d functions. There's no real
# way to catch this that wouldn't slow things down at runtime though, I don't
# think.
assert jnp.ndim(dynamic_index) == 1
dynamic_index = dynamic_index[0]
del dynamic_index_bdim, abstract_fn_bdim, transforms_bdim
args = jtu.tree_map(_move_to_front, args, args_bdim, is_leaf=_is_unmapped)
args_bdim = jtu.tree_map(_int_to_zero, args_bdim, is_leaf=_is_unmapped)
out = filter_primitive_bind(
noinline_p,
dynamic_index,
abstract_fn,
transforms + [_MetaBatchTransform(args_bdim)],
args,
)
return out, jtu.tree_map(lambda _: 0, out)
# Not a PyTree
| _MetaBatchTransform |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 145571,
"end": 146276
} | class ____(fixtures.TestBase):
@testing.provide_metadata
def test_callable_as_arg(self, connection):
ucode = util.partial(Unicode)
thing_table = Table("thing", self.metadata, Column("name", ucode(20)))
assert isinstance(thing_table.c.name.type, Unicode)
thing_table.create(connection)
@testing.provide_metadata
def test_callable_as_kwarg(self, connection):
ucode = util.partial(Unicode)
thang_table = Table(
"thang",
self.metadata,
Column("name", type_=ucode(20), primary_key=True),
)
assert isinstance(thang_table.c.name.type, Unicode)
thang_table.create(connection)
| CallableTest |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 50329,
"end": 55245
} | class ____(testing.TestCase):
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
@pytest.mark.skipif(
backend.backend() != "jax", reason="Testing Jax errors only"
)
def test_segment_reduce_no_num_segments(self, segment_reduce_op):
data = jnp.array([1, 2, 3, 4])
segment_ids = jnp.array([0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError,
"Argument `num_segments` must be set when using the JAX backend.",
):
segment_reduce_op(data, segment_ids)
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Tensorflow error only"
)
def test_segment_reduce_sort_and_num_segments(self, segment_reduce_op):
data = np.array([1, 2, 3, 4])
segment_ids = np.array([0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError,
"Argument `num_segments` cannot be set when sorted is True when "
"using the tensorflow backend.",
):
segment_reduce_op(data, segment_ids, num_segments=2, sorted=True)
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
def test_segment_reduce_multi_dim_segment_ids(self, segment_reduce_op):
data = np.array([1, 2, 3, 4])
segment_ids = np.array([0, 0, 1, 1]).reshape((2, 2))
with self.assertRaisesRegex(
ValueError,
"Argument `segment_ids` should be an 1-D vector,",
):
segment_reduce_op(data, segment_ids)
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
def test_segment_reduce_leading_not_match(self, segment_reduce_op):
data = np.array([])
segment_ids = np.array([0, 0, 1, 1])
with self.assertRaisesRegex(
ValueError,
"Argument `segment_ids` and `data` should have same leading "
"dimension.",
):
segment_reduce_op(data, segment_ids)
output_tensor = segment_reduce_op(
KerasTensor(shape=(None, 4)), KerasTensor(shape=(5,))
)
self.assertEqual(output_tensor.shape, (None, 4))
output_tensor = segment_reduce_op(
KerasTensor(shape=(5, 4)), KerasTensor(shape=(None,))
)
self.assertEqual(output_tensor.shape, (None, 4))
output_tensor = segment_reduce_op(
KerasTensor(shape=(None, 4)), KerasTensor(shape=(None,))
)
self.assertEqual(output_tensor.shape, (None, 4))
def test_stft_invalid_input_type(self):
# backend agnostic error message
x = np.array([1, 2, 3, 4])
sequence_length = 2
sequence_stride = 1
fft_length = 4
with self.assertRaisesRegex(TypeError, "`float32` or `float64`"):
kmath.stft(x, sequence_length, sequence_stride, fft_length)
def test_invalid_fft_length(self):
# backend agnostic error message
x = np.array([1.0, 2.0, 3.0, 4.0])
sequence_length = 4
sequence_stride = 1
fft_length = 2
with self.assertRaisesRegex(ValueError, "`fft_length` must equal or"):
kmath.stft(x, sequence_length, sequence_stride, fft_length)
def test_stft_invalid_window(self):
# backend agnostic error message
x = np.array([1.0, 2.0, 3.0, 4.0])
sequence_length = 2
sequence_stride = 1
fft_length = 4
window = "invalid_window"
with self.assertRaisesRegex(ValueError, "If a string is passed to"):
kmath.stft(
x, sequence_length, sequence_stride, fft_length, window=window
)
def test_stft_invalid_window_shape(self):
# backend agnostic error message
x = np.array([1.0, 2.0, 3.0, 4.0])
sequence_length = 2
sequence_stride = 1
fft_length = 4
window = np.ones((sequence_length + 1))
with self.assertRaisesRegex(ValueError, "The shape of `window` must"):
kmath.stft(
x, sequence_length, sequence_stride, fft_length, window=window
)
def test_istft_invalid_window_shape_2D_inputs(self):
# backend agnostic error message
x = (np.array([[1.0, 2.0]]), np.array([[3.0, 4.0]]))
sequence_length = 2
sequence_stride = 1
fft_length = 4
incorrect_window = np.ones((sequence_length + 1,))
with self.assertRaisesRegex(
ValueError, "The shape of `window` must be equal to"
):
kmath.istft(
x,
sequence_length,
sequence_stride,
fft_length,
window=incorrect_window,
)
@pytest.mark.skipif(
backend.backend() == "openvino",
reason="Complex dtype is not supported on OpenVINO backend.",
)
| TestMathErrors |
python | numpy__numpy | tools/swig/test/testVector.py | {
"start": 12219,
"end": 12482
} | class ____(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
| longTestCase |
python | spack__spack | lib/spack/spack/platforms/darwin.py | {
"start": 270,
"end": 2215
} | class ____(Platform):
priority = 89
binary_formats = ["macho"]
def __init__(self):
super().__init__("darwin")
mac_os = MacOs()
self.default_os = str(mac_os)
self.add_operating_system(str(mac_os), mac_os)
@classmethod
def detect(cls):
return "darwin" in py_platform.system().lower()
def setup_platform_environment(self, pkg, env):
"""Specify deployment target based on target OS version.
The ``MACOSX_DEPLOYMENT_TARGET`` environment variable provides a
default ``-mmacosx-version-min`` argument for GCC and Clang compilers,
as well as the default value of ``CMAKE_OSX_DEPLOYMENT_TARGET`` for
CMake-based build systems. The default value for the deployment target
is usually the major version (11, 10.16, ...) for CMake and Clang, but
some versions of GCC specify a minor component as well (11.3), leading
to numerous link warnings about inconsistent or incompatible target
versions. Setting the environment variable ensures consistent versions
for an install toolchain target, even when the host macOS version
changes.
TODO: it may be necessary to add SYSTEM_VERSION_COMPAT for older
versions of the macosx developer tools; see
https://github.com/spack/spack/pull/26290 for discussion.
"""
os = self.operating_sys[pkg.spec.os]
version = Version(os.version)
if len(version) == 1:
# Version has only one component: add a minor version to prevent
# potential errors with `ld`,
# which fails with `-macosx_version_min 11`
# but succeeds with `-macosx_version_min 11.0`.
# Most compilers seem to perform this translation automatically,
# but older GCC does not.
version = str(version) + ".0"
env.set("MACOSX_DEPLOYMENT_TARGET", str(version))
| Darwin |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_types.py | {
"start": 16614,
"end": 17794
} | class ____:
async def test_read_block_document_for_block_type(
self, client, block_type_x, block_document
):
response = await client.get(
f"/block_types/slug/{block_type_x.slug}/block_documents/name/{block_document.name}"
)
assert response.status_code == status.HTTP_200_OK
read_block_document = BlockDocument.model_validate(response.json())
assert read_block_document.id == block_document.id
assert read_block_document.name == block_document.name
async def test_read_block_document_for_nonexistent_block_type(
self, client, block_document
):
response = await client.get(
f"/block_types/slug/nonsense/block_documents/name/{block_document.name}"
)
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_read_block_document_for_nonexistent_block_document(
self, client, block_type_x
):
response = await client.get(
f"/block_types/slug/{block_type_x.slug}/block_documents/name/nonsense"
)
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadBlockDocumentByNameForBlockType |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/colormaplut.py | {
"start": 401,
"end": 743
} | class ____(Parameter):
itemClass = ColorMapLutParameterItem
def _interpretValue(self, v):
if isinstance(v, str):
v = colormap.get(v)
if v is not None and not isinstance(v, colormap.ColorMap):
raise TypeError("Cannot set colormap parameter from object %r" % v)
return v
| ColorMapLutParameter |
python | plotly__plotly.py | plotly/graph_objs/choropleth/_marker.py | {
"start": 233,
"end": 3771
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth"
_path_str = "choropleth.marker"
_valid_props = {"line", "opacity", "opacitysrc"}
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.choropleth.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def opacity(self):
"""
Sets the opacity of the locations.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def _prop_descriptions(self):
return """\
line
:class:`plotly.graph_objects.choropleth.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the locations.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
"""
def __init__(self, arg=None, line=None, opacity=None, opacitysrc=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.Marker`
line
:class:`plotly.graph_objects.choropleth.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the locations.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("line", arg, line)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | pandas-dev__pandas | pandas/io/parsers/python_parser.py | {
"start": 1899,
"end": 49060
} | class ____(ParserBase):
_no_thousands_columns: set[int]
def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:
"""
Workhorse function for processing nested list into DataFrame
"""
super().__init__(kwds)
self.data: Iterator[list[str]] | list[list[Scalar]] = []
self.buf: list = []
self.pos = 0
self.line_pos = 0
self.skiprows = kwds["skiprows"]
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
self.delimiter = kwds["delimiter"]
self.quotechar = kwds["quotechar"]
if isinstance(self.quotechar, str):
self.quotechar = str(self.quotechar)
self.escapechar = kwds["escapechar"]
self.doublequote = kwds["doublequote"]
self.skipinitialspace = kwds["skipinitialspace"]
self.lineterminator = kwds["lineterminator"]
self.quoting = kwds["quoting"]
self.skip_blank_lines = kwds["skip_blank_lines"]
# Passed from read_excel
self.has_index_names = kwds.get("has_index_names", False)
self.thousands = kwds["thousands"]
self.decimal = kwds["decimal"]
self.comment = kwds["comment"]
# Set self.data to something that can read lines.
if isinstance(f, list):
# read_excel: f is a nested list, can contain non-str
self.data = f
else:
assert hasattr(f, "readline")
# yields list of str
self.data = self._make_reader(f)
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
self._col_indices: list[int] | None = None
columns: list[list[Scalar | None]]
(
columns,
self.num_original_columns,
self.unnamed_cols,
) = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
# error: Cannot determine type of 'index_names'
(
self.columns,
self.index_names,
self.col_names,
_,
) = self._extract_multi_indexer_columns(
columns,
self.index_names,
)
# get popped off for index
self.orig_names: list[Hashable] = list(self.columns)
index_names, self.orig_names, self.columns = self._get_index_name()
if self.index_names is None:
self.index_names = index_names
if self._col_indices is None:
self._col_indices = list(range(len(self.columns)))
self._no_thousands_columns = self._set_no_thousand_columns()
if len(self.decimal) != 1:
raise ValueError("Only length-1 decimal markers supported")
@cache_readonly
def num(self) -> re.Pattern:
decimal = re.escape(self.decimal)
if self.thousands is None:
regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
else:
thousands = re.escape(self.thousands)
regex = (
rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
rf"([0-9]?(E|e)\-?[0-9]+)?$"
)
return re.compile(regex)
def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> Iterator[list[str]]:
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError(
"Custom line terminators not supported in python parser (yet)"
)
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = "\n"
dia = MyDialect
if sep is not None:
dia.delimiter = sep
# Skip rows at file level before csv.reader sees them
# prevents CSV parsing errors on lines that will be discarded
if self.skiprows is not None:
while self.skipfunc(self.pos):
line = f.readline()
if not line:
break
self.pos += 1
else:
# attempt to sniff the delimiter from the first valid line,
# i.e. no comment line and not in skiprows
line = f.readline()
lines = self._check_comments([[line]])[0]
while self.skipfunc(self.pos) or not lines:
self.pos += 1
line = f.readline()
lines = self._check_comments([[line]])[0]
lines_str = cast(list[str], lines)
# since `line` was a string, lines will be a list containing
# only a single string
line = lines_str[0]
self.pos += 1
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
# Note: encoding is irrelevant here
line_rdr = csv.reader(StringIO(line), dialect=dia)
self.buf.extend(list(line_rdr))
# Note: encoding is irrelevant here
reader = csv.reader(f, dialect=dia, strict=True)
else:
def _read():
line = f.readline()
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
return reader
def read(
self, rows: int | None = None
) -> tuple[
Index | None,
Sequence[Hashable] | MultiIndex,
Mapping[Hashable, ArrayLike | Series],
]:
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
self.close()
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
index: Index | None
columns: Sequence[Hashable] = list(self.orig_names)
if not content: # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
# error: Cannot determine type of 'index_col'
names = dedup_names(
self.orig_names,
is_potential_multi_index(
self.orig_names,
self.index_col,
),
)
index, columns, col_dict = self._get_empty_meta(
names,
self.dtype,
)
conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, conv_columns, col_dict
# handle new style for names in index
indexnamerow = None
if self.has_index_names and sum(
int(v == "" or v is None) for v in content[0]
) == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data, columns = self._exclude_implicit_index(alldata)
conv_data = self._convert_data(data)
conv_data = self._do_date_conversions(columns, conv_data)
index, result_columns = self._make_index(alldata, columns, indexnamerow)
return index, result_columns, conv_data
def _exclude_implicit_index(
self,
alldata: list[np.ndarray],
) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:
# error: Cannot determine type of 'index_col'
names = dedup_names(
self.orig_names,
is_potential_multi_index(
self.orig_names,
self.index_col,
),
)
offset = 0
if self._implicit_index:
offset = len(self.index_col)
len_alldata = len(alldata)
self._check_data_length(names, alldata)
return {
name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata
}, names
# legacy
def get_chunk(
self, size: int | None = None
) -> tuple[
Index | None,
Sequence[Hashable] | MultiIndex,
Mapping[Hashable, ArrayLike | Series],
]:
if size is None:
# error: "PythonParser" has no attribute "chunksize"
size = self.chunksize # type: ignore[attr-defined]
return self.read(rows=size)
def _convert_data(
self,
data: Mapping[Hashable, np.ndarray],
) -> Mapping[Hashable, ArrayLike]:
# apply converters
clean_conv = self._clean_mapping(self.converters)
clean_dtypes = self._clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
clean_na_fvalues = {}
if isinstance(self.na_values, dict):
for col in self.na_values:
if col is not None:
na_value = self.na_values[col]
na_fvalue = self.na_fvalues[col]
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_na_values[col] = na_value
clean_na_fvalues[col] = na_fvalue
else:
clean_na_values = self.na_values
clean_na_fvalues = self.na_fvalues
return self._convert_to_ndarrays(
data,
clean_na_values,
clean_na_fvalues,
clean_conv,
clean_dtypes,
)
@final
def _convert_to_ndarrays(
self,
dct: Mapping,
na_values,
na_fvalues,
converters=None,
dtypes=None,
) -> dict[Any, np.ndarray]:
result = {}
parse_date_cols = validate_parse_dates_presence(self.parse_dates, self.columns)
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if c in parse_date_cols:
# GH#26203 Do not convert columns which get converted to dates
# but replace nans to ensure to_datetime works
mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) # pyright: ignore[reportArgumentType]
np.putmask(values, mask, np.nan)
result[c] = values
continue
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used."
),
ParserWarning,
stacklevel=find_stack_level(),
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values,
set(col_na_values) | col_na_fvalues,
cast_type is None,
try_num_bool=False,
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values,
set(col_na_values) | col_na_fvalues,
cast_type is None,
try_num_bool,
)
# type specified in dtype param or cast_type is an EA
if cast_type is not None:
cast_type = pandas_dtype(cast_type)
if cast_type and (cvals.dtype != cast_type or is_ea):
if not is_ea and na_count > 0:
if is_bool_dtype(cast_type):
raise ValueError(f"Bool column has NA values in column {c}")
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
return result
@final
def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
"""
Cast values to specified type
Parameters
----------
values : ndarray or ExtensionArray
cast_type : np.dtype or ExtensionDtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray or ExtensionArray
"""
if isinstance(cast_type, CategoricalDtype):
known_cats = cast_type.categories is not None
if not is_object_dtype(values.dtype) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses all categories
# as strings
values = lib.ensure_string_array(
values, skipna=False, convert_na_value=False
)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif isinstance(cast_type, ExtensionDtype):
array_type = cast_type.construct_array_type()
try:
if isinstance(cast_type, BooleanDtype):
# error: Unexpected keyword argument "true_values" for
# "_from_sequence_of_strings" of "ExtensionArray"
values_str = [str(val) for val in values]
return array_type._from_sequence_of_strings( # type: ignore[call-arg]
values_str,
dtype=cast_type,
true_values=self.true_values, # pyright: ignore[reportCallIssue]
false_values=self.false_values, # pyright: ignore[reportCallIssue]
none_values=self.na_values, # pyright: ignore[reportCallIssue]
)
else:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order to be used in parser methods"
) from err
elif isinstance(values, ExtensionArray):
values = values.astype(cast_type, copy=False)
elif issubclass(cast_type.type, str):
# TODO: why skipna=True here and False above? some tests depend
# on it here, but nothing fails if we change it above
# (as no tests get there as of 2022-12-06)
values = lib.ensure_string_array(
values, skipna=True, convert_na_value=False
)
else:
try:
values = astype_array(values, cast_type, copy=True)
except ValueError as err:
raise ValueError(
f"Unable to convert column {column} to type {cast_type}"
) from err
return values
@cache_readonly
def _have_mi_columns(self) -> bool:
if self.header is None:
return False
header = self.header
if isinstance(header, (list, tuple, np.ndarray)):
return len(header) > 1
else:
return False
def _infer_columns(
self,
) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:
names = self.names
num_original_columns = 0
clear_buffer = True
unnamed_cols: set[Scalar | None] = set()
if self.header is not None:
header = self.header
have_mi_columns = self._have_mi_columns
if isinstance(header, (list, tuple, np.ndarray)):
# we have a mi columns, so read an extra line
if have_mi_columns:
header = list(header) + [header[-1] + 1]
else:
header = [header]
columns: list[list[Scalar | None]] = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration as err:
if 0 < self.line_pos <= hr and (
not have_mi_columns or hr != header[-1]
):
# If no rows we want to raise a different message and if
# we have mi columns, the last line is not part of the header
joi = list(map(str, header[:-1] if have_mi_columns else header))
msg = f"[{','.join(joi)}], len of {len(joi)}, "
raise ValueError(
f"Passed header={msg}but only {self.line_pos} lines in file"
) from err
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self.buf.clear()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns, unnamed_cols
if not self.names:
raise EmptyDataError("No columns to parse from file") from err
line = self.names[:]
this_columns: list[Scalar | None] = []
this_unnamed_cols = []
for i, c in enumerate(line):
if c == "":
if have_mi_columns:
col_name = f"Unnamed: {i}_level_{level}"
else:
col_name = f"Unnamed: {i}"
this_unnamed_cols.append(i)
this_columns.append(col_name)
else:
this_columns.append(c)
if not have_mi_columns:
counts: DefaultDict = defaultdict(int)
# Ensure that regular columns are used before unnamed ones
# to keep given names and mangle unnamed columns
col_loop_order = [
i
for i in range(len(this_columns))
if i not in this_unnamed_cols
] + this_unnamed_cols
# TODO: Use pandas.io.common.dedup_names instead (see #50371)
for i in col_loop_order:
col = this_columns[i]
old_col = col
cur_count = counts[col]
if cur_count > 0:
while cur_count > 0:
counts[old_col] = cur_count + 1
col = f"{old_col}.{cur_count}"
if col in this_columns:
cur_count += 1
else:
cur_count = counts[col]
if (
self.dtype is not None
and is_dict_like(self.dtype)
and self.dtype.get(old_col) is not None
and self.dtype.get(col) is None
):
self.dtype.update({col: self.dtype.get(old_col)})
this_columns[i] = col
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but it's not in our
# format so save in the buffer, and create a blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
sic = self.index_col
ic = len(sic) if sic is not None else 0
unnamed_count = len(this_unnamed_cols)
# if wrong number of blanks or no index, not our format
if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self.buf.clear()
first_line: list[Scalar] | None
if names is not None:
# Read first row after header to check if data are longer
try:
first_line = self._next_line()
except StopIteration:
first_line = None
len_first_data_row = 0 if first_line is None else len(first_line)
if len(names) > len(columns[0]) and len(names) > len_first_data_row:
raise ValueError(
"Number of passed names did not match "
"number of header fields in the file"
)
if len(columns) > 1:
raise TypeError("Cannot pass names with multi-index columns")
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names, num_original_columns)
else:
num_original_columns = len(names)
if self._col_indices is not None and len(names) != len(
self._col_indices
):
columns = [[names[i] for i in sorted(self._col_indices)]]
else:
columns = [names]
else:
columns = self._handle_usecols(
columns, columns[0], num_original_columns
)
else:
ncols = len(self._header_line)
num_original_columns = ncols
if not names:
columns = [list(range(ncols))]
columns = self._handle_usecols(columns, columns[0], ncols)
elif self.usecols is None or len(names) >= ncols:
columns = self._handle_usecols([names], names, ncols)
num_original_columns = len(names)
elif not callable(self.usecols) and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
else:
# Ignore output but set used columns.
columns = [names]
self._handle_usecols(columns, columns[0], ncols)
return columns, num_original_columns, unnamed_cols
@cache_readonly
def _header_line(self):
# Store line for reuse in _get_index_name
if self.header is not None:
return None
try:
line = self._buffered_line()
except StopIteration as err:
if not self.names:
raise EmptyDataError("No columns to parse from file") from err
line = self.names[:]
return line
def _handle_usecols(
self,
columns: list[list[Scalar | None]],
usecols_key: list[Scalar | None],
num_original_columns: int,
) -> list[list[Scalar | None]]:
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
col_indices: set[int] | list[int]
if self.usecols is not None:
if callable(self.usecols):
col_indices = evaluate_callable_usecols(self.usecols, usecols_key)
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError(
"If using multiple headers, usecols must be integers."
)
col_indices = []
for col in self.usecols:
if isinstance(col, str):
try:
col_indices.append(usecols_key.index(col))
except ValueError:
self._validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
missing_usecols = [
col for col in self.usecols if col >= num_original_columns
]
if missing_usecols:
raise ParserError(
"Defining usecols with out-of-bounds indices is not allowed. "
f"{missing_usecols} are out-of-bounds.",
)
col_indices = self.usecols
columns = [
[n for i, n in enumerate(column) if i in col_indices]
for column in columns
]
self._col_indices = sorted(col_indices)
return columns
def _buffered_line(self) -> list[Scalar]:
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], str):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
if first_elt != _BOM:
return first_row
first_row_bom = first_row[0]
new_row: str
if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
start = 2
quote = first_row_bom[1]
end = first_row_bom[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row_bom[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row_bom) > end + 1:
new_row += first_row_bom[end + 1 :]
else:
# No quotation so just remove BOM from first element
new_row = first_row_bom[1:]
new_row_list: list[Scalar] = [new_row]
return new_row_list + first_row[1:]
def _is_line_empty(self, line: Sequence[Scalar]) -> bool:
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
def _next_line(self) -> list[Scalar]:
if isinstance(self.data, list):
while self.skipfunc(self.pos):
if self.pos >= len(self.data):
break
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if not self.skip_blank_lines and (
self._is_line_empty(self.data[self.pos - 1]) or line
):
break
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
except IndexError as err:
raise StopIteration from err
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
if orig_line is not None:
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
def _alert_malformed(self, msg: str, row_num: int) -> None:
"""
Alert a user about a malformed row, depending on value of
`self.on_bad_lines` enum.
If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.
If `self.on_bad_lines` is WARN, the alert will be printed out.
Parameters
----------
msg: str
The error message to display.
row_num: int
The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
raise ParserError(msg)
if self.on_bad_lines == self.BadLineHandleMethod.WARN or callable(
self.on_bad_lines
):
warnings.warn(
f"Skipping line {row_num}: {msg}\n",
ParserWarning,
stacklevel=find_stack_level(),
)
def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num: int
The row number of the line being parsed.
"""
try:
assert not isinstance(self.data, list)
line = next(self.data)
# lie about list[str] vs list[Scalar] to minimize ignores
return line # type: ignore[return-value]
except csv.Error as e:
if self.on_bad_lines in (
self.BadLineHandleMethod.ERROR,
self.BadLineHandleMethod.WARN,
):
msg = str(e)
if "NULL byte" in msg or "line contains NUL" in msg:
msg = (
"NULL byte detected. This byte "
"cannot be processed in Python's "
"native csv library at the moment, "
"so please pass in engine='c' instead"
)
if self.skipfooter > 0:
reason = (
"Error could possibly be due to "
"parsing errors in the skipped footer rows "
"(the skipfooter keyword is only applied "
"after Python's csv library has parsed "
"all rows)."
)
msg += ". " + reason
self._alert_malformed(msg, row_num)
return None
def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
if self.comment is None:
return lines
ret = []
for line in lines:
rl = []
for x in line:
if (
not isinstance(x, str)
or self.comment not in x
or x in self.na_values
):
rl.append(x)
else:
x = x[: x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]:
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : list of list of Scalars
The array of lines that we are to filter.
Returns
-------
filtered_lines : list of list of Scalars
The same array of lines with the "empty" ones removed.
"""
# Remove empty lines and lines with only one whitespace value
ret = [
line
for line in lines
if (
len(line) > 1
or (
len(line) == 1 and (not isinstance(line[0], str) or line[0].strip())
)
)
]
return ret
def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
if self.thousands is None:
return lines
return self._search_replace_num_columns(
lines=lines, search=self.thousands, replace=""
)
def _search_replace_num_columns(
self, lines: list[list[Scalar]], search: str, replace: str
) -> list[list[Scalar]]:
ret = []
for line in lines:
rl = []
for i, x in enumerate(line):
if (
not isinstance(x, str)
or search not in x
or i in self._no_thousands_columns
or not self.num.search(x.strip())
):
rl.append(x)
else:
rl.append(x.replace(search, replace))
ret.append(rl)
return ret
def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
if self.decimal == parser_defaults["decimal"]:
return lines
return self._search_replace_num_columns(
lines=lines, search=self.decimal, replace="."
)
def _get_index_name(
self,
) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
columns: Sequence[Hashable] = self.orig_names
orig_names = list(columns)
columns = list(columns)
line: list[Scalar] | None
if self._header_line is not None:
line = self._header_line
else:
try:
line = self._next_line()
except StopIteration:
line = None
next_line: list[Scalar] | None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
index_col = self.index_col
if index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if (
next_line is not None
and self.header is not None
and index_col is not False
):
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = list(range(len(line)))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
orig_names = list(columns)
self.num_original_columns = len(columns)
return line, orig_names, columns
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = list(range(implicit_first_cols))
index_name = None
else:
# Case 2
(index_name, _, self.index_col) = self._clean_index_names(
columns, self.index_col
)
return index_name, orig_names, columns
def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
max_len = max(len(row) for row in content)
# Check that there are no rows with too many
# elements in their row (rows with too few
# elements are padded with NaN).
if max_len > col_len and self.index_col is not False and self.usecols is None:
footers = self.skipfooter if self.skipfooter else 0
bad_lines = []
iter_content = enumerate(content)
content_len = len(content)
content = []
for i, _content in iter_content:
actual_len = len(_content)
if actual_len > col_len:
if callable(self.on_bad_lines):
new_l = self.on_bad_lines(_content)
if new_l is not None:
new_l = cast(list[Scalar], new_l)
if len(new_l) > col_len:
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, len(new_l), "callable"))
new_l = new_l[:col_len]
content.append(new_l)
elif self.on_bad_lines in (
self.BadLineHandleMethod.ERROR,
self.BadLineHandleMethod.WARN,
):
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len, "normal"))
if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
break
else:
content.append(_content)
for row_num, actual_len, source in bad_lines:
msg = (
f"Expected {col_len} fields in line {row_num + 1}, saw {actual_len}"
)
if source == "callable":
msg += " from bad_lines callable"
elif (
self.delimiter
and len(self.delimiter) > 1
and self.quoting != csv.QUOTE_NONE
):
# see gh-13374
reason = (
"Error could possibly be due to quotes being "
"ignored when a multi-char delimiter is used."
)
msg += ". " + reason
self._alert_malformed(msg, row_num + 1)
# see gh-13320
zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
if self.usecols:
assert self._col_indices is not None
col_indices = self._col_indices
if self._implicit_index:
zipped_content = [
a
for i, a in enumerate(zipped_content)
if (
i < len(self.index_col)
or i - len(self.index_col) in col_indices
)
]
else:
zipped_content = [
a for i, a in enumerate(zipped_content) if i in col_indices
]
return zipped_content
def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos :]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos : self.pos + rows]
new_pos = self.pos + rows
new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
row_index = 0
row_ct = 0
offset = self.pos if self.pos is not None else 0
while row_ct < rows:
new_row = next(self.data)
if not self.skipfunc(offset + row_index):
row_ct += 1
row_index += 1
new_rows.append(new_row)
len_new_rows = len(new_rows)
new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
else:
rows = 0
while True:
next_row = self._next_iter_line(row_num=self.pos + rows + 1)
rows += 1
if next_row is not None:
new_rows.append(next_row)
len_new_rows = len(new_rows)
except StopIteration:
len_new_rows = len(new_rows)
new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len_new_rows
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[: -self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
if self.skiprows:
return [
row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
]
return new_rows
def _set_no_thousand_columns(self) -> set[int]:
no_thousands_columns: set[int] = set()
if self.columns and self.parse_dates:
assert self._col_indices is not None
no_thousands_columns = self._set_noconvert_dtype_columns(
self._col_indices, self.columns
)
if self.columns and self.dtype:
assert self._col_indices is not None
for i, col in zip(self._col_indices, self.columns, strict=True):
if not isinstance(self.dtype, dict) and not is_numeric_dtype(
self.dtype
):
no_thousands_columns.add(i)
if (
isinstance(self.dtype, dict)
and col in self.dtype
and (
not is_numeric_dtype(self.dtype[col])
or is_bool_dtype(self.dtype[col])
)
):
no_thousands_columns.add(i)
return no_thousands_columns
| PythonParser |
python | sympy__sympy | sympy/stats/frv.py | {
"start": 2359,
"end": 3278
} | class ____(FiniteDomain):
"""
A FiniteDomain over a single symbol/set
Example: The possibilities of a *single* die roll.
"""
def __new__(cls, symbol, set):
if not isinstance(set, FiniteSet) and \
not isinstance(set, Intersection):
set = FiniteSet(*set)
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
@property
def set(self):
return self.args[1]
@property
def elements(self):
return FiniteSet(*[frozenset(((self.symbol, elem), )) for elem in self.set])
def __iter__(self):
return (frozenset(((self.symbol, elem),)) for elem in self.set)
def __contains__(self, other):
sym, val = tuple(other)[0]
return sym == self.symbol and val in self.set
| SingleFiniteDomain |
python | pytorch__pytorch | test/jit/test_tensor_methods.py | {
"start": 391,
"end": 1198
} | class ____(JitTestCase):
def test_getitem(self):
def tensor_getitem(inp: torch.Tensor):
indices = torch.tensor([0, 2], dtype=torch.long)
return inp.__getitem__(indices)
inp = torch.rand(3, 4)
self.checkScript(tensor_getitem, (inp,))
scripted = torch.jit.script(tensor_getitem)
FileCheck().check("aten::index").run(scripted.graph)
def test_getitem_invalid(self):
def tensor_getitem_invalid(inp: torch.Tensor):
return inp.__getitem__()
with self.assertRaisesRegexWithHighlight(
RuntimeError, "expected exactly 1 argument", "inp.__getitem__"
):
torch.jit.script(tensor_getitem_invalid)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestTensorMethods |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/controller.py | {
"start": 5215,
"end": 19877
} | class ____(AbstractContextManager):
_daemon_uuid: str
_daemons: dict[str, DagsterDaemon]
_grpc_server_registry: Optional[GrpcServerRegistry]
_daemon_threads: dict[str, threading.Thread]
_workspace_process_context: IWorkspaceProcessContext
_instance: DagsterInstance
_heartbeat_interval_seconds: float
_heartbeat_tolerance_seconds: float
_daemon_shutdown_event: threading.Event
_logger: logging.Logger
_last_healthy_heartbeat_times: dict[str, float]
_start_time: datetime.datetime
def __init__(
self,
workspace_process_context: IWorkspaceProcessContext,
daemons: Sequence[DagsterDaemon],
grpc_server_registry: Optional[GrpcServerRegistry] = None,
heartbeat_interval_seconds: float = DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
heartbeat_tolerance_seconds: float = DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
error_interval_seconds: int = DEFAULT_DAEMON_ERROR_INTERVAL_SECONDS,
):
self._daemon_uuid = str(uuid.uuid4())
self._daemons = {}
self._daemon_threads = {}
self._workspace_process_context = workspace_process_context
self._instance = workspace_process_context.instance
self._daemons = {daemon.daemon_type(): daemon for daemon in daemons}
self._heartbeat_interval_seconds = check.numeric_param(
heartbeat_interval_seconds, "heartbeat_interval_seconds"
)
self._heartbeat_tolerance_seconds = check.numeric_param(
heartbeat_tolerance_seconds, "heartbeat_tolerance_seconds"
)
self._grpc_server_registry = grpc_server_registry
if not self._daemons:
raise Exception("No daemons configured on the DagsterInstance")
self._daemon_shutdown_event = threading.Event()
self._logger = logging.getLogger("dagster.daemon")
self._logger.info(
"Instance is configured with the following daemons: %s",
_sorted_quoted(type(daemon).__name__ for daemon in self.daemons),
)
self._last_healthy_heartbeat_times = {}
for daemon_type, daemon in self._daemons.items():
self._daemon_threads[daemon_type] = threading.Thread(
target=daemon.run_daemon_loop,
args=(
workspace_process_context,
self._daemon_uuid,
self._daemon_shutdown_event,
heartbeat_interval_seconds,
error_interval_seconds,
),
name=f"dagster-daemon-{daemon_type}",
daemon=True, # Individual daemons should not outlive controller process
)
self._last_healthy_heartbeat_times[daemon_type] = time.time()
self._daemon_threads[daemon_type].start()
self._start_time = get_current_datetime()
def __enter__(self) -> Self:
return self
def _daemon_thread_healthy(self, daemon_type: str) -> bool:
thread = self._daemon_threads[daemon_type]
return thread.is_alive()
def _daemon_heartbeat_health(self) -> Mapping[str, bool]:
now = time.time()
try:
daemon_statuses_by_type = get_daemon_statuses(
self._instance,
daemon_types=self._daemons.keys(),
heartbeat_interval_seconds=self._heartbeat_interval_seconds,
heartbeat_tolerance_seconds=self._heartbeat_tolerance_seconds,
ignore_errors=True,
)
daemon_health_by_type = {
daemon_type: daemon_status.healthy
for (daemon_type, daemon_status) in daemon_statuses_by_type.items()
}
for daemon_type, is_daemon_healthy in daemon_health_by_type.items():
if is_daemon_healthy:
self._last_healthy_heartbeat_times[daemon_type] = now
return daemon_health_by_type # type: ignore # (possible None)
except Exception:
self._logger.warning(
"Error attempting to check daemon heartbeats",
exc_info=sys.exc_info, # type: ignore # (should be func call)
)
return {
daemon_type: (
self._last_healthy_heartbeat_times[daemon_type]
> now - self._heartbeat_tolerance_seconds
)
for daemon_type in self._daemons.keys()
}
def check_daemon_threads(self) -> None:
failed_daemons = [
daemon_type
for daemon_type in self._daemon_threads
if not self._daemon_thread_healthy(daemon_type)
]
if failed_daemons:
self._logger.error(
"Stopping dagster-daemon process since the following threads are no longer"
f" running: {failed_daemons}"
)
raise Exception("Stopped dagster-daemon process due to threads no longer running")
def check_daemon_heartbeats(self) -> None:
no_heartbeat_daemons = [
daemon_type
for daemon_type, is_daemon_healthy in self._daemon_heartbeat_health().items()
if not is_daemon_healthy
]
if no_heartbeat_daemons:
self._logger.warning(
"The following threads have not sent heartbeats in more than"
f" {self._heartbeat_tolerance_seconds} seconds: {no_heartbeat_daemons}."
" They may be running more slowly than expected or hanging."
)
def check_workspace_freshness(self, last_workspace_update_time: float) -> float:
nowish = get_current_timestamp()
try:
if (nowish - last_workspace_update_time) > RELOAD_WORKSPACE_INTERVAL:
if self._grpc_server_registry:
self._grpc_server_registry.clear_all_grpc_endpoints()
self._workspace_process_context.refresh_workspace()
return get_current_timestamp()
except Exception:
if (nowish - last_workspace_update_time) > DEFAULT_WORKSPACE_FRESHNESS_TOLERANCE:
self._logger.exception("Daemon controller surpassed workspace freshness tolerance.")
raise
else:
self._logger.exception(
"Daemon controller failed to refresh workspace. Still within freshness tolerance."
)
return last_workspace_update_time
def check_daemon_loop(self) -> None:
start_time = time.time()
last_heartbeat_check_time = start_time
last_workspace_update_time = start_time
while True:
with raise_interrupts_as(KeyboardInterrupt):
time.sleep(THREAD_CHECK_INTERVAL)
self.check_daemon_threads()
# periodically refresh the shared workspace context
last_workspace_update_time = self.check_workspace_freshness(
last_workspace_update_time
)
if self._instance.daemon_skip_heartbeats_without_errors:
# If we're skipping heartbeats without errors, we just check the threads.
# If there's no errors, the daemons won't be writing heartbeats.
continue
now = get_current_timestamp()
# Give the daemon enough time to send an initial heartbeat before checking
if (
(now - start_time) < 2 * self._heartbeat_interval_seconds
or now - last_heartbeat_check_time < HEARTBEAT_CHECK_INTERVAL
):
continue
self.check_daemon_heartbeats()
last_heartbeat_check_time = get_current_timestamp()
def __exit__( # pyright: ignore[reportIncompatibleMethodOverride]
self,
exception_type: type[BaseException],
exception_value: Exception,
traceback: TracebackType,
) -> None:
if isinstance(exception_value, KeyboardInterrupt):
self._logger.info("Received interrupt, shutting down daemon threads...")
elif exception_type:
self._logger.warning(
f"Shutting down daemon threads due to {exception_type.__name__}..."
)
else:
self._logger.info("Shutting down daemon threads...")
self._daemon_shutdown_event.set()
for daemon_type, thread in self._daemon_threads.items():
if thread.is_alive():
thread.join(timeout=30)
if thread.is_alive():
self._logger.error("Thread for %s did not shut down gracefully.", daemon_type)
self._logger.info("Daemon threads shut down.")
def _add_daemon(self, daemon: DagsterDaemon) -> None:
self._daemons[daemon.daemon_type()] = daemon
def get_daemon(self, daemon_type: str) -> DagsterDaemon:
return self._daemons.get(daemon_type) # type: ignore # (possible none)
@property
def daemons(self) -> Sequence[DagsterDaemon]:
return list(self._daemons.values())
def create_daemon_of_type(daemon_type: str, instance: DagsterInstance) -> DagsterDaemon:
if daemon_type == SchedulerDaemon.daemon_type():
return SchedulerDaemon()
elif daemon_type == SensorDaemon.daemon_type():
return SensorDaemon(settings=instance.get_sensor_settings())
elif daemon_type == QueuedRunCoordinatorDaemon.daemon_type():
return QueuedRunCoordinatorDaemon(
interval_seconds=instance.run_coordinator.dequeue_interval_seconds # type: ignore # (??)
)
elif daemon_type == BackfillDaemon.daemon_type():
return BackfillDaemon(settings=instance.get_backfill_settings())
elif daemon_type == MonitoringDaemon.daemon_type():
return MonitoringDaemon(interval_seconds=instance.run_monitoring_poll_interval_seconds)
elif daemon_type == EventLogConsumerDaemon.daemon_type():
return EventLogConsumerDaemon()
elif daemon_type == AssetDaemon.daemon_type():
return AssetDaemon(
settings=instance.get_auto_materialize_settings(),
pre_sensor_interval_seconds=(
instance.auto_materialize_minimum_interval_seconds
if instance.auto_materialize_minimum_interval_seconds is not None
else DEFAULT_DAEMON_INTERVAL_SECONDS
),
)
elif daemon_type == FreshnessDaemon.daemon_type():
return FreshnessDaemon()
else:
raise Exception(f"Unexpected daemon type {daemon_type}")
def all_daemons_healthy(
instance: DagsterInstance,
curr_time_seconds: Optional[float] = None,
heartbeat_interval_seconds: float = DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
heartbeat_tolerance_seconds: float = DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
) -> bool:
"""True if all required daemons have had a recent heartbeat with no errors."""
statuses_by_type = get_daemon_statuses(
instance,
daemon_types=instance.get_required_daemon_types(),
heartbeat_interval_seconds=heartbeat_interval_seconds,
heartbeat_tolerance_seconds=heartbeat_tolerance_seconds,
curr_time_seconds=curr_time_seconds,
)
return all(status.healthy for status in statuses_by_type.values())
def all_daemons_live(
instance: DagsterInstance,
curr_time_seconds: Optional[float] = None,
heartbeat_interval_seconds: float = DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
heartbeat_tolerance_seconds: float = DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
) -> bool:
"""True if all required daemons have had a recent heartbeat, regardless of if it contained errors."""
statuses_by_type = get_daemon_statuses(
instance,
daemon_types=instance.get_required_daemon_types(),
heartbeat_interval_seconds=heartbeat_interval_seconds,
heartbeat_tolerance_seconds=heartbeat_tolerance_seconds,
curr_time_seconds=curr_time_seconds,
ignore_errors=True,
)
return all(status.healthy for status in statuses_by_type.values())
def get_daemon_statuses(
instance: DagsterInstance,
daemon_types: Iterable[str],
curr_time_seconds: Optional[float] = None,
ignore_errors: bool = False,
heartbeat_interval_seconds: float = DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
heartbeat_tolerance_seconds: float = DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
) -> Mapping[str, DaemonStatus]:
curr_time_seconds = check.opt_float_param(
curr_time_seconds, "curr_time_seconds", default=get_current_timestamp()
)
daemon_statuses_by_type: dict[str, DaemonStatus] = {}
heartbeats = instance.get_daemon_heartbeats()
for daemon_type in daemon_types:
# check if daemon is not required
if daemon_type not in instance.get_required_daemon_types():
daemon_statuses_by_type[daemon_type] = DaemonStatus(
daemon_type=daemon_type, required=False, healthy=None, last_heartbeat=None
)
else:
# check if daemon has a heartbeat
if daemon_type not in heartbeats:
daemon_statuses_by_type[daemon_type] = DaemonStatus(
daemon_type=daemon_type, required=True, healthy=False, last_heartbeat=None
)
else:
# check if daemon has sent a recent heartbeat
latest_heartbeat = heartbeats[daemon_type]
hearbeat_timestamp = latest_heartbeat.timestamp
maximum_tolerated_time = (
hearbeat_timestamp + heartbeat_interval_seconds + heartbeat_tolerance_seconds
)
healthy = curr_time_seconds <= maximum_tolerated_time
if not ignore_errors and latest_heartbeat.errors:
healthy = False
daemon_statuses_by_type[daemon_type] = DaemonStatus(
daemon_type=daemon_type,
required=True,
healthy=healthy,
last_heartbeat=heartbeats[daemon_type],
)
return daemon_statuses_by_type
def debug_daemon_heartbeats(instance: DagsterInstance) -> None:
daemon = SensorDaemon(settings=instance.get_sensor_settings())
timestamp = get_current_timestamp()
instance.add_daemon_heartbeat(DaemonHeartbeat(timestamp, daemon.daemon_type(), None, None))
returned_timestamp = instance.get_daemon_heartbeats()[daemon.daemon_type()].timestamp
print(f"Written timestamp: {timestamp}\nRead timestamp: {returned_timestamp}") # noqa: T201
| DagsterDaemonController |
python | sympy__sympy | sympy/ntheory/ecm.py | {
"start": 621,
"end": 11793
} | class ____:
"""Montgomery form of Points in an elliptic curve.
In this form, the addition and doubling of points
does not need any y-coordinate information thus
decreasing the number of operations.
Using Montgomery form we try to perform point addition
and doubling in least amount of multiplications.
The elliptic curve used here is of the form
(E : b*y**2*z = x**3 + a*x**2*z + x*z**2).
The a_24 parameter is equal to (a + 2)/4.
References
==========
.. [1] Kris Gaj, Soonhak Kwon, Patrick Baier, Paul Kohlbrenner, Hoang Le, Mohammed Khaleeluddin, Ramakrishna Bachimanchi,
Implementing the Elliptic Curve Method of Factoring in Reconfigurable Hardware,
Cryptographic Hardware and Embedded Systems - CHES 2006 (2006), pp. 119-133,
https://doi.org/10.1007/11894063_10
https://www.hyperelliptic.org/tanja/SHARCS/talks06/Gaj.pdf
"""
def __init__(self, x_cord, z_cord, a_24, mod):
"""
Initial parameters for the Point class.
Parameters
==========
x_cord : X coordinate of the Point
z_cord : Z coordinate of the Point
a_24 : Parameter of the elliptic curve in Montgomery form
mod : modulus
"""
self.x_cord = x_cord
self.z_cord = z_cord
self.a_24 = a_24
self.mod = mod
def __eq__(self, other):
"""Two points are equal if X/Z of both points are equal
"""
if self.a_24 != other.a_24 or self.mod != other.mod:
return False
return self.x_cord * other.z_cord % self.mod ==\
other.x_cord * self.z_cord % self.mod
def add(self, Q, diff):
"""
Add two points self and Q where diff = self - Q. Moreover the assumption
is self.x_cord*Q.x_cord*(self.x_cord - Q.x_cord) != 0. This algorithm
requires 6 multiplications. Here the difference between the points
is already known and using this algorithm speeds up the addition
by reducing the number of multiplication required. Also in the
mont_ladder algorithm is constructed in a way so that the difference
between intermediate points is always equal to the initial point.
So, we always know what the difference between the point is.
Parameters
==========
Q : point on the curve in Montgomery form
diff : self - Q
Examples
========
>>> from sympy.ntheory.ecm import Point
>>> p1 = Point(11, 16, 7, 29)
>>> p2 = Point(13, 10, 7, 29)
>>> p3 = p2.add(p1, p1)
>>> p3.x_cord
23
>>> p3.z_cord
17
"""
u = (self.x_cord - self.z_cord)*(Q.x_cord + Q.z_cord)
v = (self.x_cord + self.z_cord)*(Q.x_cord - Q.z_cord)
add, subt = u + v, u - v
x_cord = diff.z_cord * add * add % self.mod
z_cord = diff.x_cord * subt * subt % self.mod
return Point(x_cord, z_cord, self.a_24, self.mod)
def double(self):
"""
Doubles a point in an elliptic curve in Montgomery form.
This algorithm requires 5 multiplications.
Examples
========
>>> from sympy.ntheory.ecm import Point
>>> p1 = Point(11, 16, 7, 29)
>>> p2 = p1.double()
>>> p2.x_cord
13
>>> p2.z_cord
10
"""
u = pow(self.x_cord + self.z_cord, 2, self.mod)
v = pow(self.x_cord - self.z_cord, 2, self.mod)
diff = u - v
x_cord = u*v % self.mod
z_cord = diff*(v + self.a_24*diff) % self.mod
return Point(x_cord, z_cord, self.a_24, self.mod)
def mont_ladder(self, k):
"""
Scalar multiplication of a point in Montgomery form
using Montgomery Ladder Algorithm.
A total of 11 multiplications are required in each step of this
algorithm.
Parameters
==========
k : The positive integer multiplier
Examples
========
>>> from sympy.ntheory.ecm import Point
>>> p1 = Point(11, 16, 7, 29)
>>> p3 = p1.mont_ladder(3)
>>> p3.x_cord
23
>>> p3.z_cord
17
"""
Q = self
R = self.double()
for i in bin(k)[3:]:
if i == '1':
Q = R.add(Q, self)
R = R.double()
else:
R = Q.add(R, self)
Q = Q.double()
return Q
def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200, seed=None):
"""Returns one factor of n using
Lenstra's 2 Stage Elliptic curve Factorization
with Suyama's Parameterization. Here Montgomery
arithmetic is used for fast computation of addition
and doubling of points in elliptic curve.
Explanation
===========
This ECM method considers elliptic curves in Montgomery
form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves
elliptic curve operations (mod N), where the elements in
Z are reduced (mod N). Since N is not a prime, E over FF(N)
is not really an elliptic curve but we can still do point additions
and doubling as if FF(N) was a field.
Stage 1 : The basic algorithm involves taking a random point (P) on an
elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm.
Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|,
might be a smooth number that divides k. Then we have k = l * |E(FF(q))|
for some l. For any point belonging to the curve E, |E(FF(q))|*P = O,
hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn
factor of N (q) can be recovered by taking gcd(kP.z_cord, N).
Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize
the fact that even if kP != 0, the value of k might miss just one large
prime divisor of |E(FF(q))|. In this case we only need to compute the
scalar multiplication by p to get p*k*P = O. Here a second bound B2
restrict the size of possible values of p.
Parameters
==========
n : Number to be Factored. Assume that it is a composite number.
B1 : Stage 1 Bound. Must be an even number.
B2 : Stage 2 Bound. Must be an even number.
max_curve : Maximum number of curves generated
Returns
=======
integer | None : a non-trivial divisor of ``n``. ``None`` if not found
References
==========
.. [1] Carl Pomerance, Richard Crandall, Prime Numbers: A Computational Perspective,
2nd Edition (2005), page 344, ISBN:978-0387252827
"""
randint = _randint(seed)
# When calculating T, if (B1 - 2*D) is negative, it cannot be calculated.
D = min(sqrt(B2), B1 // 2 - 1)
sieve.extend(D)
beta = [0] * D
S = [0] * D
k = 1
for p in primerange(2, B1 + 1):
k *= pow(p, int(log(B1, p)))
# Pre-calculate the prime numbers to be used in stage 2.
# Using the fact that the x-coordinates of point P and its
# inverse -P coincide, the number of primes to be checked
# in stage 2 can be reduced.
deltas_list = []
for r in range(B1 + 2*D, B2 + 2*D, 4*D):
# d in deltas iff r+(2d+1) and/or r-(2d+1) is prime
deltas = {abs(q - r) >> 1 for q in primerange(r - 2*D, r + 2*D)}
deltas_list.append(list(deltas))
for _ in range(max_curve):
#Suyama's Parametrization
sigma = randint(6, n - 1)
u = (sigma**2 - 5) % n
v = (4*sigma) % n
u_3 = pow(u, 3, n)
try:
# We use the elliptic curve y**2 = x**3 + a*x**2 + x
# where a = pow(v - u, 3, n)*(3*u + v)*invert(4*u_3*v, n) - 2
# However, we do not declare a because it is more convenient
# to use a24 = (a + 2)*invert(4, n) in the calculation.
a24 = pow(v - u, 3, n)*(3*u + v)*invert(16*u_3*v, n) % n
except ZeroDivisionError:
#If the invert(16*u_3*v, n) doesn't exist (i.e., g != 1)
g = gcd(2*u_3*v, n)
#If g = n, try another curve
if g == n:
continue
return g
Q = Point(u_3, pow(v, 3, n), a24, n)
Q = Q.mont_ladder(k)
g = gcd(Q.z_cord, n)
#Stage 1 factor
if g != 1 and g != n:
return g
#Stage 1 failure. Q.z = 0, Try another curve
elif g == n:
continue
#Stage 2 - Improved Standard Continuation
S[0] = Q
Q2 = Q.double()
S[1] = Q2.add(Q, Q)
beta[0] = (S[0].x_cord*S[0].z_cord) % n
beta[1] = (S[1].x_cord*S[1].z_cord) % n
for d in range(2, D):
S[d] = S[d - 1].add(Q2, S[d - 2])
beta[d] = (S[d].x_cord*S[d].z_cord) % n
# i.e., S[i] = Q.mont_ladder(2*i + 1)
g = 1
W = Q.mont_ladder(4*D)
T = Q.mont_ladder(B1 - 2*D)
R = Q.mont_ladder(B1 + 2*D)
for deltas in deltas_list:
# R = Q.mont_ladder(r) where r in range(B1 + 2*D, B2 + 2*D, 4*D)
alpha = (R.x_cord*R.z_cord) % n
for delta in deltas:
# We want to calculate
# f = R.x_cord * S[delta].z_cord - S[delta].x_cord * R.z_cord
f = (R.x_cord - S[delta].x_cord)*\
(R.z_cord + S[delta].z_cord) - alpha + beta[delta]
g = (g*f) % n
T, R = R, R.add(W, T)
g = gcd(n, g)
#Stage 2 Factor found
if g != 1 and g != n:
return g
def ecm(n, B1=10000, B2=100000, max_curve=200, seed=1234):
"""Performs factorization using Lenstra's Elliptic curve method.
This function repeatedly calls ``_ecm_one_factor`` to compute the factors
of n. First all the small factors are taken out using trial division.
Then ``_ecm_one_factor`` is used to compute one factor at a time.
Parameters
==========
n : Number to be Factored
B1 : Stage 1 Bound. Must be an even number.
B2 : Stage 2 Bound. Must be an even number.
max_curve : Maximum number of curves generated
seed : Initialize pseudorandom generator
Examples
========
>>> from sympy.ntheory import ecm
>>> ecm(25645121643901801)
{5394769, 4753701529}
>>> ecm(9804659461513846513)
{4641991, 2112166839943}
"""
from .factor_ import _perfect_power
n = as_int(n)
if B1 % 2 != 0 or B2 % 2 != 0:
raise ValueError("both bounds must be even")
TF_LIMIT = 100000
factors = set()
for prime in sieve.primerange(2, TF_LIMIT):
if n % prime == 0:
factors.add(prime)
while(n % prime == 0):
n //= prime
queue = []
def check(m):
if isprime(m):
factors.add(m)
return
if result := _perfect_power(m, TF_LIMIT):
return check(result[0])
queue.append(m)
check(n)
while queue:
n = queue.pop()
factor = _ecm_one_factor(n, B1, B2, max_curve, seed)
if factor is None:
raise ValueError("Increase the bounds")
check(factor)
check(n // factor)
return factors
| Point |
python | davidhalter__jedi | test/run.py | {
"start": 4360,
"end": 9732
} | class ____(BaseTestCase):
def __init__(self, test_type, correct, line_nr, column, start, line,
path=None, skip_version_info=None):
super().__init__(skip_version_info)
self.test_type = test_type
self.correct = correct
self.line_nr = line_nr
self.column = column
self.start = start
self.line = line
self.path = path
self._project = jedi.Project(test_dir)
@property
def module_name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def line_nr_test(self):
"""The test is always defined on the line before."""
return self.line_nr - 1
def __repr__(self):
return '<%s: %s:%s %r>' % (self.__class__.__name__, self.path,
self.line_nr_test, self.line.rstrip())
def script(self, environment):
return jedi.Script(
self.source,
path=self.path,
environment=environment,
project=self._project
)
def run(self, compare_cb, environment=None):
testers = {
TEST_COMPLETIONS: self.run_completion,
TEST_INFERENCE: self.run_inference,
TEST_GOTO: self.run_goto,
TEST_REFERENCES: self.run_get_references,
}
if (self.path.endswith('pytest.py') or self.path.endswith('conftest.py')) \
and os.path.realpath(environment.executable) != os.path.realpath(sys.executable):
# It's not guarantueed that pytest is installed in test
# environments, if we're not running in the same environment that
# we're already in, so just skip that case.
pytest.skip()
return testers[self.test_type](compare_cb, environment)
def run_completion(self, compare_cb, environment):
completions = self.script(environment).complete(self.line_nr, self.column)
# import cProfile; cProfile.run('...')
comp_str = {c.name for c in completions}
for r in completions:
# Test if this access raises an error
assert isinstance(r.type, str)
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
def run_inference(self, compare_cb, environment):
script = self.script(environment)
inference_state = script._inference_state
def comparison(definition):
suffix = '()' if definition.type == 'instance' else ''
return definition.full_name + suffix
def definition(correct, correct_start, path):
should_be = set()
for match in re.finditer('(?:[^ ]+)', correct):
string = match.group(0)
parser = grammar313.parse(string, start_symbol='eval_input', error_recovery=False)
parser_utils.move(parser.get_root_node(), self.line_nr)
node = parser.get_root_node()
module_context = script._get_module_context()
user_context = get_user_context(module_context, (self.line_nr, 0))
node.parent = user_context.tree_node
results = convert_values(user_context.infer_node(node))
if not results:
raise Exception('Could not resolve %s on line %s'
% (match.string, self.line_nr - 1))
should_be |= set(Name(inference_state, r.name) for r in results)
debug.dbg('Finished getting types', color='YELLOW')
# Because the objects have different ids, `repr`, then compare.
should = set(comparison(r) for r in should_be)
return should
should = definition(self.correct, self.start, script.path)
result = script.infer(self.line_nr, self.column)
is_str = set(comparison(r) for r in result)
for r in result:
# Test if this access raises an error
assert isinstance(r.type, str)
return compare_cb(self, is_str, should)
def run_goto(self, compare_cb, environment):
result = self.script(environment).goto(self.line_nr, self.column)
comp_str = str(sorted(str(r.description) for r in result))
return compare_cb(self, comp_str, self.correct)
def run_get_references(self, compare_cb, environment):
result = self.script(environment).get_references(self.line_nr, self.column)
self.correct = self.correct.strip()
compare = sorted(
(('stub:' if r.is_stub() else '')
+ re.sub(r'^completion\.', '', r.module_name),
r.line,
r.column)
for r in result
)
wanted = []
if not self.correct:
positions = []
else:
positions = literal_eval(self.correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
if pos_tup[1] == ...:
pos_tup = pos_tup[0], ANY, pos_tup[2]
wanted.append(pos_tup)
else:
line = pos_tup[0]
if pos_tup[0] is not None:
line += self.line_nr
wanted.append((self.module_name, line, pos_tup[1]))
return compare_cb(self, compare, sorted(wanted))
| IntegrationTestCase |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_authenticator_details.py | {
"start": 4093,
"end": 6735
} | class ____(UserAuthenticatorDetailsTestBase):
endpoint = "sentry-api-0-user-authenticator-device-details"
method = "delete"
def test_u2f_remove_device(self) -> None:
auth = get_auth(self.user)
with self.tasks():
self.get_success_response(
self.user.id,
auth.id,
"devicekeyhandle",
status_code=status.HTTP_204_NO_CONTENT,
)
authenticator = Authenticator.objects.get(id=auth.id)
assert isinstance(authenticator.interface, U2fInterface)
assert len(authenticator.interface.get_registered_devices()) == 1
assert_security_email_sent("mfa-removed")
# Can't remove last device.
# TODO(mgaeta): We should not allow the API to return a 500.
with self.tasks():
self.get_error_response(
self.user.id,
auth.id,
"aowerkoweraowerkkro",
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# Only one send.
assert_security_email_sent("mfa-removed")
def test_require_2fa__delete_device__ok(self) -> None:
self._require_2fa_for_organization()
self.test_u2f_remove_device()
def test_rename_device(self) -> None:
auth = get_auth(self.user)
self.get_success_response(
self.user.id,
auth.id,
"devicekeyhandle",
name="for testing",
method="put",
status_code=status.HTTP_204_NO_CONTENT,
)
authenticator = Authenticator.objects.get(id=auth.id)
assert isinstance(authenticator.interface, U2fInterface)
assert authenticator.interface.get_device_name("devicekeyhandle") == "for testing"
def test_rename_webauthn_device(self) -> None:
auth = get_auth_webauthn(self.user)
self.get_success_response(
self.user.id,
auth.id,
"webauthn",
name="for testing",
method="put",
status_code=status.HTTP_204_NO_CONTENT,
)
authenticator = Authenticator.objects.get(id=auth.id)
assert isinstance(authenticator.interface, U2fInterface)
assert authenticator.interface.get_device_name("webauthn") == "for testing"
def test_rename_device_not_found(self) -> None:
auth = get_auth(self.user)
self.get_error_response(
self.user.id,
auth.id,
"not_a_real_device",
name="for testing",
method="put",
)
@control_silo_test
| UserAuthenticatorDeviceDetailsTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/signal/shape_ops_test.py | {
"start": 1334,
"end": 14011
} | class ____(test.TestCase):
def test_mapping_of_indices_without_padding(self):
tensor = constant_op.constant(np.arange(9152), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 180, pad_end=False)
expected = np.tile(np.arange(512), (49, 1))
expected += np.tile(np.arange(49) * 180, (512, 1)).T
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_mapping_of_indices_with_padding(self):
tensor = constant_op.constant(np.arange(10000), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 192, pad_end=True)
expected = np.tile(np.arange(512), (53, 1))
expected += np.tile(np.arange(53) * 192, (512, 1)).T
expected[expected >= 10000] = 0
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_invalid_inputs(self):
# Rank 0 input signal.
with self.assertRaises(ValueError):
shape_ops.frame(1, 1, 1)
if not context.executing_eagerly():
# If the rank is unknown, do not raise an exception.
shape_ops.frame(array_ops.placeholder_with_default(
1, shape=tensor_shape.TensorShape(None)), 1, 1)
# Non-scalar frame_length.
with self.assertRaises(ValueError):
shape_ops.frame([1], [1], 1)
# Non-scalar frame_step.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, [1])
# Non-scalar pad_value.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, 1, pad_end=True, pad_value=[1])
def test_length_zero(self):
signal = constant_op.constant([], dtype=dtypes.float32)
frame_length = 2
frame_step = 1
result = self.evaluate(shape_ops.frame(
signal, frame_length, frame_step, pad_end=True, pad_value=99))
self.assertEqual((0, 2), result.shape)
result = self.evaluate(
shape_ops.frame(signal, frame_length, frame_step, pad_end=False))
self.assertEqual((0, 2), result.shape)
def test_shape_inference(self):
if context.executing_eagerly():
return
signal = array_ops.zeros((1, 1), dtype=dtypes.int32)
frame_length = 2
frame_step = 1
# Shape inference is able to detect the rank and inner-most dimension
# if frame_length is known at graph definition time.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
self.assertEqual([1, 1, 2], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
self.assertEqual([1, 0, 2], result.shape.as_list())
# If frame_length is not known, rank and (known) outer and inner dimensions
# are inferred.
signal = array_ops.zeros([1, 2, 3, 4], dtype=dtypes.int32)
frame_length = array_ops.placeholder_with_default(
ops.convert_to_tensor(0, dtypes.int32), shape=[])
frame_step = 1
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=1)
self.assertEqual([1, 2, None, 3, 4], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=1)
self.assertEqual([1, None, None, 3, 4], result.shape.as_list())
# If frame_length and inner-most dimension is known, rank, inner dimensions,
# and known outer dimensions are inferred.
signal = array_ops.placeholder_with_default(
array_ops.zeros((0, 5, 0, 20, 5, 3), dtype=dtypes.int32),
shape=[None, 5, None, 20, 5, 3])
frame_length = 4
frame_step = 3
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=3)
self.assertEqual([None, 5, None, 7, 4, 5, 3], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=3)
self.assertEqual([None, 5, None, 6, 4, 5, 3], result.shape.as_list())
# Test that shape inference is consistent with actual returned shapes for
# small values of signal_length, frame_length, frame_step, and pad_end in
# [True, False].
frame_step = 1
for signal_length in range(2):
signal = [0] * signal_length
for frame_length in range(2):
for pad_end in [False, True]:
op = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end, pad_value=99)
result = self.evaluate(op)
self.assertEqual(op.shape.as_list(), list(result.shape))
def test_basic_mono(self):
signal = np.arange(6)
frame_length = 3
frame_step = 2
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99)
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4], [4, 5, 99]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False)
self.assertAllEqual(expected, result)
def test_basic_stereo(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10])
frame_length = 3
frame_step = 2
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99)
expected_inner_frames = np.array([
[[0, 1, 2], [2, 3, 4], [4, 5, 99]],
[[10, 11, 12], [12, 13, 14], [14, 15, 99]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[[0, 1, 2], [2, 3, 4]],
[[10, 11, 12], [12, 13, 14]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False)
self.assertAllEqual(expected, result)
def test_complex_shape(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10,
np.arange(6) + 20,
np.arange(6) + 30,
np.arange(6) + 40,
np.arange(6) + 50])
signal = np.reshape(signal, (2, 1, 3, 1, 6))
frame_length = 3
frame_step = 2
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
# Resulting shape is (2, 1, 3, 1, 3, 3).
expected = [[[[[[0, 1, 2], [2, 3, 4], [4, 5, 99]]],
[[[10, 11, 12], [12, 13, 14], [14, 15, 99]]],
[[[20, 21, 22], [22, 23, 24], [24, 25, 99]]]]],
[[[[[30, 31, 32], [32, 33, 34], [34, 35, 99]]],
[[[40, 41, 42], [42, 43, 44], [44, 45, 99]]],
[[[50, 51, 52], [52, 53, 54], [54, 55, 99]]]]]]
self.assertAllEqual(expected, result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
# Resulting shape is (2, 1, 3, 1, 3, 2).
expected = [[[[[[0, 1, 2], [2, 3, 4]]],
[[[10, 11, 12], [12, 13, 14]]],
[[[20, 21, 22], [22, 23, 24]]]]],
[[[[[30, 31, 32], [32, 33, 34]]],
[[[40, 41, 42], [42, 43, 44]]],
[[[50, 51, 52], [52, 53, 54]]]]]]
self.assertAllEqual(expected, result)
def test_axis(self):
signal = np.reshape(np.arange(16), (2, 4, 2))
result = shape_ops.frame(signal, frame_length=2, frame_step=2,
pad_end=True, axis=1)
expected = np.reshape(np.arange(16), (2, 2, 2, 2))
self.assertAllEqual(expected, self.evaluate(result))
result = shape_ops.frame(signal, frame_length=2, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3]],
[[2, 3], [4, 5]],
[[4, 5], [6, 7]],
[[6, 7], [0, 0]]],
[[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[12, 13], [14, 15]],
[[14, 15], [0, 0]]]]
self.assertAllEqual(expected, self.evaluate(result))
result = shape_ops.frame(signal, frame_length=3, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3], [4, 5]],
[[2, 3], [4, 5], [6, 7]],
[[4, 5], [6, 7], [0, 0]],
[[6, 7], [0, 0], [0, 0]]],
[[[8, 9], [10, 11], [12, 13]],
[[10, 11], [12, 13], [14, 15]],
[[12, 13], [14, 15], [0, 0]],
[[14, 15], [0, 0], [0, 0]]]]
self.assertAllEqual(expected, self.evaluate(result))
def test_window_larger_than_signal(self):
signal = constant_op.constant([[1, 2], [11, 12]], dtype=dtypes.float32)
frame_length = 4
frame_step = 1
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
self.assertAllClose([[[1, 2, 99, 99], [2, 99, 99, 99]],
[[11, 12, 99, 99], [12, 99, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
self.assertEqual((2, 0, 4), result.shape)
frame_step = 2
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
self.assertAllClose([[[1, 2, 99, 99]], [[11, 12, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
self.assertEqual((2, 0, 4), result.shape)
def test_preserves_type(self):
signal = math_ops.range(10, dtype=dtypes.float64)
frame_length = 2
frame_step = 3
result = shape_ops.frame(signal, frame_length, frame_step)
self.assertEqual(result.dtype, signal.dtype)
def test_dynamic_tensor(self):
if context.executing_eagerly():
return
# Show that frame works even when the dimensions of its input are
# not known at graph creation time.
input_signal = np.vstack([np.arange(4), np.arange(4) + 10,
np.arange(4) + 20])
frame_length = 2
frame_step = 2
signal_placeholder = array_ops.placeholder_with_default(
input_signal, shape=(None, None))
result = self.evaluate(
shape_ops.frame(signal_placeholder, frame_length, frame_step))
self.assertAllEqual([[[0, 1], [2, 3]],
[[10, 11], [12, 13]],
[[20, 21], [22, 23]]], result)
def test_gradient_numerical(self):
if context.executing_eagerly():
return
with self.session():
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frame_length = 33
frame_step = 9
frames = shape_ops.frame(signal, frame_length, frame_step)
error = test.compute_gradient_error(
signal, signal_shape, frames, frames.shape.as_list())
self.assertLess(error, 2e-5)
def test_constant_folding(self):
"""frame should be constant foldable for constant inputs."""
if context.executing_eagerly():
return
for pad_end in [True, False]:
g = ops.Graph()
with g.as_default():
frame_length, frame_step = 32, 16
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frames = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end)
rewritten_graph = test_util.grappler_optimize(g, [frames])
self.assertEqual(1, len(rewritten_graph.node))
if __name__ == "__main__":
test.main()
| FrameTest |
python | ray-project__ray | doc/source/serve/doc_code/autoscale_model_comp_example.py | {
"start": 127,
"end": 311
} | class ____:
async def __call__(self) -> str:
start = time.time()
while time.time() - start < 0.1:
pass
return "light"
@serve.deployment
| LightLoad |
python | apache__avro | lang/py/avro/schema.py | {
"start": 3867,
"end": 4971
} | class ____:
"""A mixin that provides basic properties."""
_reserved_properties: Sequence[str] = ()
_props: Optional[MutableMapping[str, object]] = None
@property
def props(self) -> MutableMapping[str, object]:
if self._props is None:
self._props = {}
return self._props
def get_prop(self, key: str) -> Optional[object]:
return self.props.get(key)
def set_prop(self, key: str, value: object) -> None:
self.props[key] = value
def check_props(self, other: "PropertiesMixin", props: Sequence[str]) -> bool:
"""Check that the given props are identical in two schemas.
@arg other: The other schema to check
@arg props: An iterable of properties to check
@return bool: True if all the properties match
"""
return all(getattr(self, prop) == getattr(other, prop) for prop in props)
@property
def other_props(self) -> Mapping[str, object]:
"""Dictionary of non-reserved properties"""
return get_other_props(self.props, self._reserved_properties)
| PropertiesMixin |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 77588,
"end": 80540
} | class ____(Qwen3ForCausalLM):
config_class = Qwen3OmniMoeTalkerCodePredictorConfig
base_model_prefix = "talker.code_predictor"
_can_record_outputs = {
"attentions": Qwen3OmniMoeTalkerCodePredictorAttention,
"hidden_states": Qwen3OmniMoeTalkerCodePredictorDecoderLayer,
}
def __init__(self, config: Qwen3OmniMoeTalkerCodePredictorConfig):
super().__init__(config)
self.model = Qwen3OmniMoeTalkerCodePredictorModel._from_config(config)
self.lm_head = nn.ModuleList(
[nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_code_groups - 1)]
)
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
cache_position=None,
generation_steps=None,
**kwargs,
):
r"""
generation_steps (`int`):
generation step of code predictor, 0..num_code_groups-1
"""
# Prefill stage
if inputs_embeds is not None and inputs_embeds.shape[1] > 1:
generation_steps = inputs_embeds.shape[1] - 2 # hidden & layer 0
# Generation stage
else:
inputs_embeds = self.model.get_input_embeddings()[generation_steps - 1](input_ids)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=None,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
logits = self.lm_head[generation_steps](hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return Qwen3OmniMoeTalkerCodePredictorOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
generation_steps=generation_steps + 1,
)
def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder=False, num_new_tokens=1):
model_kwargs = super()._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder, num_new_tokens
)
model_kwargs["generation_steps"] = outputs.generation_steps
return model_kwargs
@dataclass
| Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 58625,
"end": 58984
} | class ____(nn.Module):
def __init__(self, fsdp_wrap):
super().__init__()
if fsdp_wrap:
self.nested_linear = wrap(nn.Linear(10, 10, bias=False).to(DEVICE_TYPE))
else:
self.nested_linear = nn.Linear(10, 10, bias=False).to(DEVICE_TYPE)
def forward(self, x):
return self.nested_linear(x)
| NestedLinear |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol17.py | {
"start": 1684,
"end": 1850
} | class ____(Protocol[_T1]):
def m1(self) -> _T1: ...
def m2(self, p1: _T1) -> None:
pass
P = ParamSpec("P")
R = TypeVar("R", covariant=True)
| Protocol8 |
python | pydata__xarray | xarray/tests/test_strategies.py | {
"start": 2890,
"end": 3069
} | class ____:
@given(attrs())
def test_type(self, attrs):
assert isinstance(attrs, dict)
check_dict_values(attrs, ALLOWED_ATTRS_VALUES_TYPES)
| TestAttrsStrategy |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 313016,
"end": 314147
} | class ____(Response):
"""
Response of tasks.edit_hyper_params endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(EditHyperParamsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| EditHyperParamsResponse |
python | numba__numba | numba/core/compiler.py | {
"start": 5549,
"end": 10009
} | class ____(namedtuple("_CompileResult", CR_FIELDS)):
"""
A structure holding results from the compilation of a function.
"""
__slots__ = ()
def _reduce(self):
"""
Reduce a CompileResult to picklable components.
"""
libdata = self.library.serialize_using_object_code()
# Make it (un)picklable efficiently
typeann = str(self.type_annotation)
fndesc = self.fndesc
# Those don't need to be pickled and may fail
fndesc.typemap = fndesc.calltypes = None
# Include all referenced environments
referenced_envs = self._find_referenced_environments()
return (libdata, self.fndesc, self.environment, self.signature,
self.objectmode, self.lifted, typeann, self.reload_init,
tuple(referenced_envs))
def _find_referenced_environments(self):
"""Returns a list of referenced environments
"""
mod = self.library._final_module
# Find environments
referenced_envs = []
for gv in mod.global_variables:
gvn = gv.name
if gvn.startswith("_ZN08NumbaEnv"):
env = lookup_environment(gvn)
if env is not None:
if env.can_cache():
referenced_envs.append(env)
return referenced_envs
@classmethod
def _rebuild(cls, target_context, libdata, fndesc, env,
signature, objectmode, lifted, typeann,
reload_init, referenced_envs):
if reload_init:
# Re-run all
for fn in reload_init:
fn()
library = target_context.codegen().unserialize_library(libdata)
cfunc = target_context.get_executable(library, fndesc, env)
cr = cls(target_context=target_context,
typing_context=target_context.typing_context,
library=library,
environment=env,
entry_point=cfunc,
fndesc=fndesc,
type_annotation=typeann,
signature=signature,
objectmode=objectmode,
lifted=lifted,
typing_error=None,
call_helper=None,
metadata=None, # Do not store, arbitrary & potentially large!
reload_init=reload_init,
referenced_envs=referenced_envs,
)
# Load Environments
for env in referenced_envs:
library.codegen.set_env(env.env_name, env)
return cr
@property
def codegen(self):
return self.target_context.codegen()
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__} {self.entry_point}')
self.signature.dump(tab=tab + ' ')
print(f'{tab}END DUMP')
_LowerResult = namedtuple("_LowerResult", [
"fndesc",
"call_helper",
"cfunc",
"env",
])
def sanitize_compile_result_entries(entries):
keys = set(entries.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
entries[k] = None
# Avoid keeping alive traceback variables
err = entries['typing_error']
if err is not None:
entries['typing_error'] = err.with_traceback(None)
return entries
def compile_result(**entries):
entries = sanitize_compile_result_entries(entries)
return CompileResult(**entries)
def run_frontend(func, inline_closures=False, emit_dels=False):
"""
Run the compiler frontend over the given Python function, and return
the function's canonical Numba IR.
If inline_closures is Truthy then closure inlining will be run
If emit_dels is Truthy the ir.Del nodes will be emitted appropriately
"""
# XXX make this a dedicated Pipeline?
func_id = bytecode.FunctionIdentity.from_function(func)
interp = interpreter.Interpreter(func_id)
bc = bytecode.ByteCode(func_id=func_id)
func_ir = interp.interpret(bc)
if inline_closures:
from numba.core.inline_closurecall import InlineClosureCallPass
inline_pass = InlineClosureCallPass(func_ir, cpu.ParallelOptions(False),
{}, False)
inline_pass.run()
post_proc = postproc.PostProcessor(func_ir)
post_proc.run(emit_dels)
return func_ir
| CompileResult |
python | fluentpython__example-code | 14-it-generator/isis2json/subfield.py | {
"start": 2809,
"end": 4128
} | class ____(object):
''' Represent an Isis field, with subfields, using
Python native datastructures
>>> author = CompositeField( [('name','Braz, Marcelo'),('role','writer')] )
>>> print author['name']
Braz, Marcelo
>>> print author['role']
writer
>>> author
CompositeField((('name', 'Braz, Marcelo'), ('role', 'writer')))
'''
def __init__(self, value, subkeys=None):
if subkeys is None:
subkeys = [item[0] for item in value]
try:
value_as_dict = dict(value)
except TypeError:
raise TypeError('%r value must be a key-value structure' % self)
for key in value_as_dict:
if key not in subkeys:
raise TypeError('Unexpected keyword %r' % key)
self.value = tuple([(key, value_as_dict.get(key,None)) for key in subkeys])
def __getitem__(self, key):
return dict(self.value)[key]
def __repr__(self):
return "CompositeField(%s)" % str(self.items())
def items(self):
return self.value
def __unicode__(self):
unicode(self.items())
def __str__(self):
str(self.items())
def test():
import doctest
doctest.testmod()
if __name__=='__main__':
test()
| CompositeField |
python | python-attrs__attrs | tests/test_cmp.py | {
"start": 10781,
"end": 13042
} | class ____:
"""
Tests for dunder attributes of classes with partial ordering.
"""
cls = PartialOrderCSameType
def test_class(self):
"""
Class name and qualified name should be well behaved.
"""
assert self.cls.__name__ == "PartialOrderCSameType"
assert self.cls.__qualname__ == "PartialOrderCSameType"
def test_eq(self):
"""
__eq__ docstring and qualified name should be well behaved.
"""
method = self.cls.__eq__
assert method.__doc__.strip() == "Return a == b. Computed by attrs."
assert method.__name__ == "__eq__"
def test_ne(self):
"""
__ne__ docstring and qualified name should be well behaved.
"""
method = self.cls.__ne__
assert method.__doc__.strip() == (
"Check equality and either forward a NotImplemented or\n"
f"{'' if PY_3_13_PLUS else ' ' * 4}return the result negated."
)
assert method.__name__ == "__ne__"
def test_lt(self):
"""
__lt__ docstring and qualified name should be well behaved.
"""
method = self.cls.__lt__
assert method.__doc__.strip() == "Return a < b. Computed by attrs."
assert method.__name__ == "__lt__"
def test_le(self):
"""
__le__ docstring and qualified name should be well behaved.
"""
method = self.cls.__le__
assert method.__doc__.strip().startswith(
"Return a <= b. Computed by @total_ordering from"
)
assert method.__name__ == "__le__"
def test_gt(self):
"""
__gt__ docstring and qualified name should be well behaved.
"""
method = self.cls.__gt__
assert method.__doc__.strip().startswith(
"Return a > b. Computed by @total_ordering from"
)
assert method.__name__ == "__gt__"
def test_ge(self):
"""
__ge__ docstring and qualified name should be well behaved.
"""
method = self.cls.__ge__
assert method.__doc__.strip().startswith(
"Return a >= b. Computed by @total_ordering from"
)
assert method.__name__ == "__ge__"
| TestDundersPartialOrdering |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 30173,
"end": 30234
} | class ____(ExprNode):
__slots__ = ("op", "operand")
| UnaryOp |
python | huggingface__transformers | src/transformers/models/cwm/modular_cwm.py | {
"start": 12576,
"end": 12722
} | class ____(LlamaForCausalLM):
pass
__all__ = [
"CwmConfig",
"CwmPreTrainedModel",
"CwmModel",
"CwmForCausalLM",
]
| CwmForCausalLM |
python | kamyu104__LeetCode-Solutions | Python/implement-queue-using-stacks.py | {
"start": 40,
"end": 639
} | class ____(object):
def __init__(self):
self.A, self.B = [], []
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.A.append(x)
def pop(self):
"""
:rtype: int
"""
self.peek()
return self.B.pop()
def peek(self):
"""
:rtype: int
"""
if not self.B:
while self.A:
self.B.append(self.A.pop())
return self.B[-1]
def empty(self):
"""
:rtype: bool
"""
return not self.A and not self.B
| MyQueue |
python | huggingface__transformers | src/transformers/models/xlstm/configuration_xlstm.py | {
"start": 1781,
"end": 12847
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`xLSTM`]. It is used to instantiate a xLSTM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the xLSTM-7b [NX-AI/xLSTM-7b](https://huggingface.co/NX-AI/xLSTM-7b) model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (int, optional, *optional*, defaults to 50304):
Vocabulary size of the xLSTM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`xLSTMModel`]. Defaults to the GPT2-NeoX tokenizer size.
hidden_size (int, optional, *optional*, defaults to 4096):
Dimensionality of the embeddings or hidden states.
embedding_dim (int, optional, *optional*, defaults to 4096):
Dimensionality of the embeddings or hidden states, use hidde_size if None.
num_hidden_layers (int, optional, *optional*, defaults to 32):
Number of blocks of the xLSTM model.
num_blocks (int, optional, *optional*, defaults to 32):
Number of blocks of the xLSTM model, use num_hidden_layers if None.
num_heads (int, optional, *optional*, defaults to 8):
Number of heads for the xLSTM Layer/Cell.
use_bias (bool, optional, *optional*, defaults to `False`):
Whether to use biases in the xLSTM model.
norm_reduction_force_float32 (bool, optional, *optional*, defaults to `True`):
Whether to force the float32 norm reduction op to be done in fp32 precision.
tie_word_embeddings (bool, optional, *optional*, defaults to `False`):
Whether to tie word embeddings to the lm head weights.
add_out_norm (bool, optional, *optional*, defaults to `True`):
Whether to add an output norm after the blocks before the LMHead.
norm_eps (float, optional, *optional*, defaults to 1e-06):
Norm eps for RMSNorm and Layer Norm.
qk_dim_factor (float, optional, *optional*, defaults to 0.5):
Scale factor for the query and key dimension.
v_dim_factor (float, optional, *optional*, defaults to 1.0):
Scale factor for the value dimension.
chunkwise_kernel (ChunkwiseKernelType, optional, *optional*, defaults to `"chunkwise--native_autograd"`):
Kernel type for chunkwise processing mode.
sequence_kernel (SequenceKernelType, optional, *optional*, defaults to `"native_sequence__native"`):
Kernel type for sequence processing mode.
step_kernel (StepKernelType, optional, *optional*, defaults to `"native"`):
Kernel type for step processing mode.
mode (BackendModeType, optional, *optional*, defaults to `"inference"`):
Operation mode (inference is needed for generation).
chunk_size (int, optional, *optional*, defaults to 64):
Internal chunk size.
return_last_states (bool, optional, *optional*, defaults to `True`):
If to return the last states / cache internally. Needed as True for generation.
autocast_kernel_dtype (DtypeType, optional, *optional*, defaults to `"bfloat16"`):
Kernel dtype for the states.
eps (float, optional, *optional*, defaults to 1e-06):
Epsilon for the mLSTM cell post norm.
inference_state_dtype (DtypeType, optional, *optional*, defaults to `"float32"`):
Kernel dtype for states in inference.
ffn_proj_factor (float, optional, *optional*, defaults to 2.667):
Size factor of the post-up projection gated Feed Forward network.
ffn_round_up_to_multiple_of (int, optional, *optional*, defaults to 64):
Size factor round value of the post-up projection gated Feed Forward network.
gate_soft_cap (float, optional, *optional*, defaults to 15.0):
Gate soft cap scale.
output_logit_soft_cap (float, optional, *optional*, defaults to 30.0):
Output logit soft cap scale.
weight_mode (`Literal`, *optional*, defaults to `"single"`):
Whether parallel linear layers are separated or fused (single).
use_cache (bool, optional, *optional*, defaults to `True`):
Whether to use the cache (xLSTMCache).
pad_token_id (int, optional, *optional*, defaults to 1):
Pad token id needed for generation.
bos_token_id (int, optional, *optional*, defaults to 0):
BOS token id needed for generation.
eos_token_id (int, optional, *optional*, defaults to 2):
EOS token id needed for generation.
max_inference_chunksize (int, optional, *optional*, defaults to 16384):
Limit the chunk size for inference to save memory.
Example:
```python
>>> from transformers import xLSTMConfig, xLSTMModel
>>> # Initializing a xLSTM configuration
>>> configuration = xLSTMConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = xLSTMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xlstm"
def __init__(
self,
vocab_size: int = 50304,
hidden_size: int = 4096,
embedding_dim: Optional[int] = None,
num_hidden_layers: Optional[int] = 32,
num_blocks: Optional[int] = None,
num_heads: int = 8,
use_bias: bool = False,
norm_reduction_force_float32: bool = True,
tie_word_embeddings: bool = False,
add_out_norm: bool = True,
norm_eps: float = 1e-6,
# mlstm_layer
qk_dim_factor: float = 0.5,
v_dim_factor: float = 1.0,
# mlstm backend
chunkwise_kernel: ChunkwiseKernelType = "chunkwise--native_autograd",
sequence_kernel: SequenceKernelType = "native_sequence__native",
step_kernel: StepKernelType = "native",
# needed to enable generation
mode: BackendModeType = "inference",
chunk_size: int = 64,
# needed to be true for generation
return_last_states: bool = True,
autocast_kernel_dtype: DtypeType = "bfloat16",
eps: float = 1e-6,
inference_state_dtype: DtypeType = "float32",
# feedforward
ffn_proj_factor: float = 2.667,
ffn_round_up_to_multiple_of: int = 64,
# capping
gate_soft_cap: float = 15.0,
output_logit_soft_cap: float = 30.0,
# weights
weight_mode: WeightModeType = "single",
# HF interface
use_cache: bool = True,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
max_inference_chunksize: int = 16384,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size if hidden_size is not None else embedding_dim
self.embedding_dim = embedding_dim if embedding_dim is not None else hidden_size
self.num_hidden_layers = num_hidden_layers if num_hidden_layers is not None else num_blocks
self.num_blocks = num_blocks if num_blocks is not None else num_hidden_layers
self.num_heads = num_heads
self.use_bias = use_bias
self.tie_word_embeddings = tie_word_embeddings
self.add_out_norm = add_out_norm
self.norm_eps = norm_eps
self.norm_reduction_force_float32 = norm_reduction_force_float32
# mlstm_layer
self.qk_dim_factor = qk_dim_factor
self.v_dim_factor = v_dim_factor
# mlstm backend
self.chunkwise_kernel = chunkwise_kernel
self.sequence_kernel = sequence_kernel
self.step_kernel = step_kernel
self.mode = mode
self.chunk_size = chunk_size
self.return_last_states = return_last_states
self.autocast_kernel_dtype = autocast_kernel_dtype
self.eps = eps
self.inference_state_dtype = inference_state_dtype
# feedforward
self.ffn_proj_factor = ffn_proj_factor
self.ffn_round_up_to_multiple_of = ffn_round_up_to_multiple_of
# capping
self.gate_soft_cap = gate_soft_cap
self.output_logit_soft_cap = output_logit_soft_cap
self.weight_mode = weight_mode
self.use_cache = use_cache
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.max_inference_chunksize = max_inference_chunksize
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@property
def qk_dim(self):
return round_up_to_next_multiple_of(
self.hidden_size * self.qk_dim_factor,
multiple_of=64,
)
@property
def v_dim(self):
return round_up_to_next_multiple_of(
self.hidden_size * self.v_dim_factor,
multiple_of=64,
)
@property
def qk_head_dim(self):
return self.qk_dim // self.num_heads
@property
def v_head_dim(self):
return self.v_dim // self.num_heads
def to_xlstm_block_config(self):
if external_xlstm:
return xLSTMLargeConfig(
vocab_size=self.vocab_size,
embedding_dim=self.hidden_size,
num_blocks=self.num_hidden_layers,
num_heads=self.num_heads,
use_bias=self.use_bias,
add_out_norm=self.add_out_norm,
norm_eps=self.norm_eps,
norm_reduction_force_float32=self.norm_reduction_force_float32,
# mlstm_layer
qk_dim_factor=self.qk_dim_factor,
v_dim_factor=self.v_dim_factor,
# mlstm backend
chunkwise_kernel=self.chunkwise_kernel,
sequence_kernel=self.sequence_kernel,
step_kernel=self.step_kernel,
mode=self.mode,
chunk_size=self.chunk_size,
return_last_states=self.return_last_states,
autocast_kernel_dtype=self.autocast_kernel_dtype,
eps=self.eps,
inference_state_dtype=self.inference_state_dtype,
# feedforward
ffn_proj_factor=self.ffn_proj_factor,
ffn_round_up_to_multiple_of=self.ffn_round_up_to_multiple_of,
# capping
gate_soft_cap=self.gate_soft_cap,
output_logit_soft_cap=self.output_logit_soft_cap,
weight_mode=self.weight_mode,
)
else:
return self
__all__ = ["xLSTMConfig"]
| xLSTMConfig |
python | django__django | tests/utils_tests/test_http.py | {
"start": 11424,
"end": 14717
} | class ____(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), "Mon, 01 Jan 2007 01:54:21 GMT")
def test_parsing_rfc1123(self):
parsed = parse_http_date("Sun, 06 Nov 1994 08:49:37 GMT")
self.assertEqual(
datetime.fromtimestamp(parsed, UTC),
datetime(1994, 11, 6, 8, 49, 37, tzinfo=UTC),
)
@unittest.skipIf(platform.architecture()[0] == "32bit", "The Year 2038 problem.")
@mock.patch("django.utils.http.datetime")
def test_parsing_rfc850(self, mocked_datetime):
mocked_datetime.side_effect = datetime
now_1 = datetime(2019, 11, 6, 8, 49, 37, tzinfo=UTC)
now_2 = datetime(2020, 11, 6, 8, 49, 37, tzinfo=UTC)
now_3 = datetime(2048, 11, 6, 8, 49, 37, tzinfo=UTC)
tests = (
(
now_1,
"Tuesday, 31-Dec-69 08:49:37 GMT",
datetime(2069, 12, 31, 8, 49, 37, tzinfo=UTC),
),
(
now_1,
"Tuesday, 10-Nov-70 08:49:37 GMT",
datetime(1970, 11, 10, 8, 49, 37, tzinfo=UTC),
),
(
now_1,
"Sunday, 06-Nov-94 08:49:37 GMT",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=UTC),
),
(
now_2,
"Wednesday, 31-Dec-70 08:49:37 GMT",
datetime(2070, 12, 31, 8, 49, 37, tzinfo=UTC),
),
(
now_2,
"Friday, 31-Dec-71 08:49:37 GMT",
datetime(1971, 12, 31, 8, 49, 37, tzinfo=UTC),
),
(
now_3,
"Sunday, 31-Dec-00 08:49:37 GMT",
datetime(2000, 12, 31, 8, 49, 37, tzinfo=UTC),
),
(
now_3,
"Friday, 31-Dec-99 08:49:37 GMT",
datetime(1999, 12, 31, 8, 49, 37, tzinfo=UTC),
),
)
for now, rfc850str, expected_date in tests:
with self.subTest(rfc850str=rfc850str):
mocked_datetime.now.return_value = now
parsed = parse_http_date(rfc850str)
mocked_datetime.now.assert_called_once_with(tz=UTC)
self.assertEqual(
datetime.fromtimestamp(parsed, UTC),
expected_date,
)
mocked_datetime.reset_mock()
def test_parsing_asctime(self):
parsed = parse_http_date("Sun Nov 6 08:49:37 1994")
self.assertEqual(
datetime.fromtimestamp(parsed, UTC),
datetime(1994, 11, 6, 8, 49, 37, tzinfo=UTC),
)
def test_parsing_asctime_nonascii_digits(self):
"""Non-ASCII unicode decimals raise an error."""
with self.assertRaises(ValueError):
parse_http_date("Sun Nov 6 08:49:37 1994")
with self.assertRaises(ValueError):
parse_http_date("Sun Nov 12 08:49:37 1994")
def test_parsing_year_less_than_70(self):
parsed = parse_http_date("Sun Nov 6 08:49:37 0037")
self.assertEqual(
datetime.fromtimestamp(parsed, UTC),
datetime(2037, 11, 6, 8, 49, 37, tzinfo=UTC),
)
| HttpDateProcessingTests |
python | apache__airflow | task-sdk/src/airflow/sdk/api/client.py | {
"start": 35746,
"end": 35881
} | class ____(BaseModel):
detail: list[RemoteValidationError] | str
def __repr__(self):
return repr(self.detail)
| _ErrorBody |
python | huggingface__transformers | src/transformers/utils/quantization_config.py | {
"start": 51701,
"end": 53512
} | class ____(QuantizationConfigMixin):
"""
This is a wrapper class about `vptq` parameters.
Args:
enable_proxy_error (`bool`, *optional*, defaults to `False`): calculate proxy error for each layer
config_for_layers (`Dict`, *optional*, defaults to `{}`): quantization params for each layer
shared_layer_config (`Dict`, *optional*, defaults to `{}`): shared quantization params among layers
modules_to_not_convert (`list`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have
some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
kwargs (`dict[str, Any]`, *optional*):
Additional parameters from which to initialize the configuration object.
"""
def __init__(
self,
enable_proxy_error: bool = False,
config_for_layers: dict[str, Any] = {},
shared_layer_config: dict[str, Any] = {},
modules_to_not_convert: list | None = None,
**kwargs,
):
self.quant_method = QuantizationMethod.VPTQ
self.enable_proxy_error = enable_proxy_error
self.config_for_layers: dict[str, Any] = config_for_layers
self.shared_layer_config: dict[str, Any] = shared_layer_config
self.modules_to_not_convert = modules_to_not_convert
self.post_init()
def post_init(self):
r"""
Safety checker that arguments are correct
"""
for layer_param in self.config_for_layers.values():
VptqLayerConfig(**layer_param)
if self.enable_proxy_error is True:
raise ValueError("enable_proxy_error should always be False until we support training")
@dataclass
| VptqConfig |
python | python__mypy | misc/perf_checker.py | {
"start": 187,
"end": 2479
} | class ____:
def __init__(self, setup: Callable[[], None], command: Callable[[], None]) -> None:
self.setup = setup
self.command = command
def print_offset(text: str, indent_length: int = 4) -> None:
print()
print(textwrap.indent(text, " " * indent_length))
print()
def delete_folder(folder_path: str) -> None:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
def execute(command: list[str]) -> None:
proc = subprocess.Popen(
" ".join(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True
)
stdout_bytes, stderr_bytes = proc.communicate()
stdout, stderr = stdout_bytes.decode("utf-8"), stderr_bytes.decode("utf-8")
if proc.returncode != 0:
print("EXECUTED COMMAND:", repr(command))
print("RETURN CODE:", proc.returncode)
print()
print("STDOUT:")
print_offset(stdout)
print("STDERR:")
print_offset(stderr)
raise RuntimeError("Unexpected error from external tool.")
def trial(num_trials: int, command: Command) -> list[float]:
trials = []
for i in range(num_trials):
command.setup()
start = time.time()
command.command()
delta = time.time() - start
trials.append(delta)
return trials
def report(name: str, times: list[float]) -> None:
print(f"{name}:")
print(f" Times: {times}")
print(f" Mean: {statistics.mean(times)}")
print(f" Stdev: {statistics.stdev(times)}")
print()
def main() -> None:
trials = 3
print("Testing baseline")
baseline = trial(
trials, Command(lambda: None, lambda: execute(["python3", "-m", "mypy", "mypy"]))
)
report("Baseline", baseline)
print("Testing cold cache")
cold_cache = trial(
trials,
Command(
lambda: delete_folder(".mypy_cache"),
lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]),
),
)
report("Cold cache", cold_cache)
print("Testing warm cache")
execute(["python3", "-m", "mypy", "-i", "mypy"])
warm_cache = trial(
trials, Command(lambda: None, lambda: execute(["python3", "-m", "mypy", "-i", "mypy"]))
)
report("Warm cache", warm_cache)
if __name__ == "__main__":
main()
| Command |
python | pydantic__pydantic | tests/typechecking/decorators.py | {
"start": 5626,
"end": 6940
} | class ____(BaseModel):
@field_validator('foo', mode='wrap')
@classmethod
def invalid_missing_handler(cls, value: Any) -> Any:
"""TODO This shouldn't be valid.
At runtime, `check_decorator_fields_exist` raises an error, as the `handler` argument is missing.
However, there's no type checking error as the provided signature matches
`pydantic_core.core_schema.NoInfoWrapValidatorFunction`.
"""
@field_validator('foo', mode='wrap') # type: ignore[type-var] # pyright: ignore[reportArgumentType]
@classmethod
def invalid_handler(cls, value: Any, handler: int) -> Any: ...
@field_validator('foo', mode='wrap')
@classmethod
def valid_no_info(cls, value: Any, handler: ValidatorFunctionWrapHandler) -> Any: ...
@field_validator('foo', mode='wrap', json_schema_input_type=int) # `json_schema_input_type` allowed here.
@classmethod
def valid_with_info_default(
cls, value: Any, handler: ValidatorFunctionWrapHandler, info: ValidationInfo
) -> Any: ...
@field_validator('foo', mode='wrap', json_schema_input_type=int) # `json_schema_input_type` allowed here.
@classmethod
def valid_with_info(cls, value: Any, handler: ValidatorFunctionWrapHandler, info: ValidationInfo[int]) -> Any: ...
| WrapFieldValidator |
python | readthedocs__readthedocs.org | readthedocs/proxito/exceptions.py | {
"start": 4936,
"end": 5599
} | class ____(ContextualizedHttp404):
"""
Raised if a version was not found.
Note: The containing project can be a subproject.
"""
template_name = "errors/proxito/404/no_project.html"
not_found_subject = pgettext_lazy(
_not_found_subject_translation_context, "documentation version"
)
def __init__(self, project, **kwargs):
"""
Raised if a version was not found.
:param project: The project in which the version could not be found
:param kwargs: Context dictionary of the rendered template
"""
kwargs["project"] = project
super().__init__(**kwargs)
| ProjectVersionHttp404 |
python | huggingface__transformers | examples/modular-transformers/modular_multimodal2.py | {
"start": 116,
"end": 757
} | class ____(CLIPVisionModel):
pass
```
with the hope that all dependencies will be renamed as `Multimodal2VisionClass`. For this reason, if we want consistency and
use the "Vision" part everywhere, we need to overwrite the intermediate classes and add the prefix everytime.
This adds noise to the modular, but is unfortunately unavoidable.
"""
from torch import nn
from transformers.models.clip.modeling_clip import (
CLIPMLP,
CLIPAttention,
CLIPEncoder,
CLIPEncoderLayer,
CLIPPreTrainedModel,
CLIPVisionModel,
CLIPVisionTransformer,
)
from transformers.utils import add_start_docstrings
| Multimodal2VisionModel |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qtensor_method_test.py | {
"start": 1048,
"end": 1360
} | class ____(_QMethodBenchmarkBase):
def forward(self, q_input):
return q_input.copy_(q_input)
op_bench.generate_pt_test(
qmethods_configs_short + qmethods_configs_long, QMethodTensorInputCopyBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QMethodTensorInputCopyBenchmark |
python | run-llama__llama_index | llama-index-core/llama_index/core/extractors/metadata_extractors.py | {
"start": 16047,
"end": 17811
} | class ____(BaseExtractor, Generic[Model]):
"""
Pydantic program extractor.
Uses an LLM to extract out a Pydantic object. Return attributes of that object
in a dictionary.
"""
program: SerializeAsAny[BasePydanticProgram[Model]] = Field(
..., description="Pydantic program to extract."
)
input_key: str = Field(
default="input",
description=(
"Key to use as input to the program (the program "
"template string must expose this key)."
),
)
extract_template_str: str = Field(
default=DEFAULT_EXTRACT_TEMPLATE_STR,
description="Template to use for extraction.",
)
@classmethod
def class_name(cls) -> str:
return "PydanticModelExtractor"
async def _acall_program(self, node: BaseNode) -> Dict[str, Any]:
"""Call the program on a node."""
if self.is_text_node_only and not isinstance(node, TextNode):
return {}
extract_str = self.extract_template_str.format(
context_str=node.get_content(metadata_mode=self.metadata_mode),
class_name=self.program.output_cls.__name__,
)
ret_object = await self.program.acall(**{self.input_key: extract_str})
assert not isinstance(ret_object, list)
return ret_object.model_dump()
async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
"""Extract pydantic program."""
program_jobs = []
for node in nodes:
program_jobs.append(self._acall_program(node))
metadata_list: List[Dict] = await run_jobs(
program_jobs, show_progress=self.show_progress, workers=self.num_workers
)
return metadata_list
| PydanticProgramExtractor |
python | pypa__packaging | tests/test_tags.py | {
"start": 43920,
"end": 50437
} | class ____:
def test__generic_abi_macos(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(
sysconfig, "get_config_var", lambda _: ".cpython-37m-darwin.so"
)
monkeypatch.setattr(tags, "interpreter_name", lambda: "cp")
assert tags._generic_abi() == ["cp37m"]
def test__generic_abi_linux_cpython(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {
"Py_DEBUG": False,
"WITH_PYMALLOC": True,
"EXT_SUFFIX": ".cpython-37m-x86_64-linux-gnu.so",
}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
monkeypatch.setattr(tags, "interpreter_name", lambda: "cp")
# They are identical
assert tags._cpython_abis((3, 7)) == ["cp37m"]
assert tags._generic_abi() == ["cp37m"]
def test__generic_abi_jp(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {"EXT_SUFFIX": ".return_exactly_this.so"}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == ["return_exactly_this"]
def test__generic_abi_graal(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {"EXT_SUFFIX": ".graalpy-38-native-x86_64-darwin.so"}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == ["graalpy_38_native"]
def test__generic_abi_disable_gil(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {
"Py_DEBUG": False,
"EXT_SUFFIX": ".cpython-313t-x86_64-linux-gnu.so",
"WITH_PYMALLOC": 0,
"Py_GIL_DISABLED": 1,
}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == ["cp313t"]
assert tags._generic_abi() == tags._cpython_abis((3, 13))
def test__generic_abi_none(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {"EXT_SUFFIX": "..so"}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == []
@pytest.mark.parametrize("ext_suffix", ["invalid", None])
def test__generic_abi_error(
self, ext_suffix: str | None, monkeypatch: pytest.MonkeyPatch
) -> None:
config = {"EXT_SUFFIX": ext_suffix}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
with pytest.raises(SystemError) as e:
tags._generic_abi()
assert "EXT_SUFFIX" in str(e.value)
def test__generic_abi_linux_pypy(self, monkeypatch: pytest.MonkeyPatch) -> None:
# issue gh-606
config = {
"Py_DEBUG": False,
"EXT_SUFFIX": ".pypy39-pp73-x86_64-linux-gnu.so",
}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
monkeypatch.setattr(tags, "interpreter_name", lambda: "pp")
assert tags._generic_abi() == ["pypy39_pp73"]
def test__generic_abi_old_windows(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {
"EXT_SUFFIX": ".pyd",
"Py_DEBUG": 0,
"WITH_PYMALLOC": 0,
"Py_GIL_DISABLED": 0,
}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == tags._cpython_abis(sys.version_info[:2])
def test__generic_abi_windows(self, monkeypatch: pytest.MonkeyPatch) -> None:
config = {
"EXT_SUFFIX": ".cp310-win_amd64.pyd",
}
monkeypatch.setattr(sysconfig, "get_config_var", config.__getitem__)
assert tags._generic_abi() == ["cp310"]
@pytest.mark.skipif(sys.implementation.name != "cpython", reason="CPython-only")
def test__generic_abi_agree(self) -> None:
"""Test that the two methods of finding the abi tag agree"""
assert tags._generic_abi() == tags._cpython_abis(sys.version_info[:2])
def test_generic_platforms(self) -> None:
platform = sysconfig.get_platform().replace("-", "_")
platform = platform.replace(".", "_")
assert list(tags._generic_platforms()) == [platform]
def test_generic_platforms_space(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Ensure platform tags normalize spaces to underscores."""
platform_ = "isilon onefs"
monkeypatch.setattr(sysconfig, "get_platform", lambda: platform_)
assert list(tags._generic_platforms()) == [platform_.replace(" ", "_")]
def test_iterator_returned(self) -> None:
result_iterator = tags.generic_tags("sillywalk33", ["abi"], ["plat1", "plat2"])
assert isinstance(result_iterator, collections.abc.Iterator)
def test_all_args(self) -> None:
result_iterator = tags.generic_tags("sillywalk33", ["abi"], ["plat1", "plat2"])
result = list(result_iterator)
assert result == [
tags.Tag("sillywalk33", "abi", "plat1"),
tags.Tag("sillywalk33", "abi", "plat2"),
tags.Tag("sillywalk33", "none", "plat1"),
tags.Tag("sillywalk33", "none", "plat2"),
]
@pytest.mark.parametrize("abi", [[], ["none"]])
def test_abi_unspecified(self, abi: list[str]) -> None:
no_abi = list(tags.generic_tags("sillywalk34", abi, ["plat1", "plat2"]))
assert no_abi == [
tags.Tag("sillywalk34", "none", "plat1"),
tags.Tag("sillywalk34", "none", "plat2"),
]
def test_interpreter_default(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(tags, "interpreter_name", lambda: "sillywalk")
monkeypatch.setattr(tags, "interpreter_version", lambda warn: "NN") # noqa: ARG005
result = list(tags.generic_tags(abis=["none"], platforms=["any"]))
assert result == [tags.Tag("sillywalkNN", "none", "any")]
def test_abis_default(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(tags, "_generic_abi", lambda: ["abi"])
result = list(tags.generic_tags(interpreter="sillywalk", platforms=["any"]))
assert result == [
tags.Tag("sillywalk", "abi", "any"),
tags.Tag("sillywalk", "none", "any"),
]
def test_platforms_default(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(tags, "platform_tags", lambda: ["plat"])
result = list(tags.generic_tags(interpreter="sillywalk", abis=["none"]))
assert result == [tags.Tag("sillywalk", "none", "plat")]
| TestGenericTags |
python | eth-brownie__brownie | tests/network/test_event.py | {
"start": 5588,
"end": 10392
} | class ____:
"""
Class testing the event subscription feature using the
brownie.network.event.event_watcher global variable
which is multi-threaded and needs to be reset between each test.
"""
@pytest.fixture(scope="function", autouse=True)
def event_watcher_reset(self, event_watcher_instance: EventWatcher):
"""Resets the event_watcher instance between each test in the class"""
event_watcher_instance.reset()
def test_can_subscribe_to_event_with_callback(_, tester: Contract):
expected_num: int = round(time.time()) % 100 # between 0 and 99
received_num: int = -1
callback_was_triggered: bool = False
def _callback(data):
nonlocal received_num, callback_was_triggered
received_num = data["args"]["num"]
callback_was_triggered = True
tester.events.subscribe("IndexedEvent", callback=_callback, delay=0.05)
wait_for_tx(tester.emitEvents("", expected_num))
time.sleep(0.1)
assert callback_was_triggered is True, "Callback was not triggered."
assert expected_num == received_num, "Callback was not triggered with the right event"
def test_can_subscribe_to_event_with_multiple_callbacks(_, tester: Contract):
callback_trigger_1: bool = False
callback_trigger_2: bool = False
def _cb1(_):
nonlocal callback_trigger_1
callback_trigger_1 = True
def _cb2(_):
nonlocal callback_trigger_2
callback_trigger_2 = True
tester.events.subscribe("IndexedEvent", callback=_cb1, delay=0.05)
tester.events.subscribe("IndexedEvent", callback=_cb2, delay=0.05)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
assert callback_trigger_1 is True, "Callback 1 was not triggered"
assert callback_trigger_2 is True, "Callback 2 was not triggered"
def test_callback_can_be_triggered_multiple_times(_, tester: Contract):
callback_trigger_count = 0
expected_callback_trigger_count = 2
def _cb(_):
nonlocal callback_trigger_count
callback_trigger_count += 1
tester.events.subscribe("Debug", callback=_cb, delay=0.05)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
assert (
callback_trigger_count == expected_callback_trigger_count
), "Callback was not triggered the exact number of time it should have"
def test_event_listener_can_timeout(_, tester: Contract):
task = tester.events.listen("IndexedEvent", timeout=1.0)
# Using asyncio.wait_for to avoid infinite loop.
result = asyncio.run(asyncio.wait_for(task, timeout=1.2))
assert result["event_data"] is None, "Listener was triggered during test."
assert result["timed_out"] is True, "Listener did not timed out."
def test_can_listen_for_event(_, tester: Contract):
expected_num = round(time.time()) % 100 # between 0 and 99
listener = tester.events.listen("IndexedEvent", timeout=10.0)
wait_for_tx(tester.emitEvents("", expected_num))
result: AttributeDict = asyncio.run(listener)
assert result.get("timed_out") is False, "Event listener timed out."
assert expected_num == result.event_data["args"]["num"]
def test_not_repeating_callback_is_removed_after_triggered(_, tester: Contract):
expected_trigger_count: int = 1
trigger_count: int = 0
def _cb(_):
nonlocal trigger_count
trigger_count += 1
event_watcher.add_event_callback(
event=tester.events.IndexedEvent, callback=_cb, delay=0.05, repeat=False
)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
assert trigger_count == expected_trigger_count
def test_can_set_both_repeating_and_not_repeating_callback_on_the_same_event(
_, tester: Contract
):
expected_trigger_count: int = 3
trigger_count: int = 0
def _cb(_):
nonlocal trigger_count
trigger_count += 1
event_watcher.add_event_callback(
event=tester.events.IndexedEvent, callback=_cb, delay=0.05, repeat=False
)
event_watcher.add_event_callback(
event=tester.events.IndexedEvent, callback=_cb, delay=0.05, repeat=True
)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
wait_for_tx(tester.emitEvents("", 0))
time.sleep(0.1)
assert trigger_count == expected_trigger_count
@pytest.mark.skip(reason="For developing purpose")
def test_scripting(_, tester: Contract):
pass
| TestEventWatcher |
python | scipy__scipy | scipy/sparse/tests/test_common1d.py | {
"start": 1536,
"end": 15653
} | class ____:
"""test common functionality shared by 1D sparse formats"""
def test_create_empty(self, spcreator):
assert_equal(spcreator((3,)).toarray(), np.zeros(3))
assert_equal(spcreator((3,)).nnz, 0)
assert_equal(spcreator((3,)).count_nonzero(), 0)
def test_invalid_shapes(self, spcreator):
with pytest.raises(ValueError, match='elements cannot be negative'):
spcreator((-3,))
def test_repr(self, spcreator, dat1d):
repr(spcreator(dat1d))
def test_str(self, spcreator, dat1d):
str(spcreator(dat1d))
def test_neg(self, spcreator):
A = np.array([-1, 0, 17, 0, -5, 0, 1, -4, 0, 0, 0, 0], 'd')
assert_equal(-A, (-spcreator(A)).toarray())
def test_1d_supported_init(self, spcreator):
A = spcreator([0, 1, 2, 3])
assert A.ndim == 1
def test_reshape_1d_tofrom_row_or_column(self, spcreator):
# add a dimension 1d->2d
x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5])
y = x.reshape(1, 12)
desired = [[1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5]]
assert_equal(y.toarray(), desired)
# remove a size-1 dimension 2d->1d
x = spcreator(desired)
y = x.reshape(12)
assert_equal(y.toarray(), desired[0])
y2 = x.reshape((12,))
assert y.shape == y2.shape
# make a 2d column into 1d. 2d->1d
y = x.T.reshape(12)
assert_equal(y.toarray(), desired[0])
def test_reshape(self, spcreator):
x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5])
y = x.reshape((4, 3))
desired = [[1, 0, 7], [0, 0, 0], [0, -3, 0], [0, 0, 5]]
assert_equal(y.toarray(), desired)
y = x.reshape((12,))
assert y is x
y = x.reshape(12)
assert_equal(y.toarray(), x.toarray())
def test_sum(self, spcreator):
np.random.seed(1234)
dat_1 = np.array([0, 1, 2, 3, -4, 5, -6, 7, 9])
dat_2 = np.random.rand(5)
dat_3 = np.array([])
dat_4 = np.zeros((40,))
arrays = [dat_1, dat_2, dat_3, dat_4]
for dat in arrays:
datsp = spcreator(dat)
with np.errstate(over='ignore'):
assert np.isscalar(datsp.sum())
assert_allclose(dat.sum(), datsp.sum())
assert_allclose(dat.sum(axis=None), datsp.sum(axis=None))
assert_allclose(dat.sum(axis=0), datsp.sum(axis=0))
assert_allclose(dat.sum(axis=-1), datsp.sum(axis=-1))
# test `out` parameter
datsp.sum(axis=0, out=np.zeros(()))
def test_sum_invalid_params(self, spcreator):
out = np.zeros((3,)) # wrong size for out
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
with pytest.raises(ValueError, match='axis out of range'):
datsp.sum(axis=1)
with pytest.raises(ValueError, match='axis out of range'):
datsp.sum(axis=(0, 3))
with pytest.raises(TypeError, match='axis must be an integer'):
datsp.sum(axis=1.5)
with pytest.raises(ValueError, match='output parameter.*wrong.*dimension'):
datsp.sum(axis=0, out=out)
def test_numpy_sum(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_sum = np.sum(dat)
datsp_sum = np.sum(datsp)
assert_allclose(dat_sum, datsp_sum)
def test_mean(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
assert_allclose(dat.mean(), datsp.mean())
assert np.isscalar(datsp.mean(axis=None))
assert_allclose(dat.mean(axis=None), datsp.mean(axis=None))
assert_allclose(dat.mean(axis=0), datsp.mean(axis=0))
assert_allclose(dat.mean(axis=-1), datsp.mean(axis=-1))
with pytest.raises(ValueError, match='axis'):
datsp.mean(axis=1)
with pytest.raises(ValueError, match='axis'):
datsp.mean(axis=-2)
def test_mean_invalid_params(self, spcreator):
out = np.asarray(np.zeros((1, 3)))
dat = np.array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]])
datsp = spcreator(dat)
with pytest.raises(ValueError, match='axis out of range'):
datsp.mean(axis=3)
with pytest.raises(ValueError, match='axis out of range'):
datsp.mean(axis=(0, 3))
with pytest.raises(TypeError, match='axis must be an integer'):
datsp.mean(axis=1.5)
with pytest.raises(ValueError, match='out.*not match shape'):
datsp.mean(axis=1, out=out)
def test_sum_dtype(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
for dtype in supported_dtypes:
dat_sum = dat.sum(dtype=dtype)
datsp_sum = datsp.sum(dtype=dtype)
assert_allclose(dat_sum, datsp_sum)
assert_equal(dat_sum.dtype, datsp_sum.dtype)
def test_mean_dtype(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
for dtype in supported_dtypes:
dat_mean = dat.mean(dtype=dtype)
datsp_mean = datsp.mean(dtype=dtype)
assert_allclose(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
def test_mean_out(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_out = np.array(0)
datsp_out = np.array(0)
dat.mean(out=dat_out)
datsp.mean(out=datsp_out)
assert_allclose(dat_out, datsp_out)
dat.mean(axis=0, out=dat_out)
datsp.mean(axis=0, out=datsp_out)
assert_allclose(dat_out, datsp_out)
with pytest.raises(ValueError, match="output parameter.*dimension"):
datsp.mean(out=np.array([0]))
with pytest.raises(ValueError, match="output parameter.*dimension"):
datsp.mean(out=np.array([[0]]))
def test_numpy_mean(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_mean = np.mean(dat)
datsp_mean = np.mean(datsp)
assert_allclose(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
def test_from_array(self, spcreator):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ComplexWarning)
A = np.array([2, 3, 4])
assert_equal(spcreator(A).toarray(), A)
A = np.array([1.0 + 3j, 0, -1])
assert_equal(spcreator(A).toarray(), A)
assert_equal(spcreator(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self, spcreator):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ComplexWarning)
A = [2, 3, 4]
assert_equal(spcreator(A).toarray(), A)
A = [1.0 + 3j, 0, -1]
assert_equal(spcreator(A).toarray(), np.array(A))
assert_equal(
spcreator(A, dtype='int16').toarray(), np.array(A).astype('int16')
)
def test_from_sparse(self, spcreator):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ComplexWarning)
D = np.array([1, 0, 0])
S = coo_array(D)
assert_equal(spcreator(S).toarray(), D)
S = spcreator(D)
assert_equal(spcreator(S).toarray(), D)
D = np.array([1.0 + 3j, 0, -1])
S = coo_array(D)
assert_equal(spcreator(S).toarray(), D)
assert_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16'))
S = spcreator(D)
assert_equal(spcreator(S).toarray(), D)
assert_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16'))
def test_toarray(self, spcreator, dat1d):
datsp = spcreator(dat1d)
# Check C- or F-contiguous (default).
chk = datsp.toarray()
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous == chk.flags.f_contiguous
# Check C-contiguous (with arg).
chk = datsp.toarray(order='C')
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous
assert chk.flags.f_contiguous
# Check F-contiguous (with arg).
chk = datsp.toarray(order='F')
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous
assert chk.flags.f_contiguous
# Check with output arg.
out = np.zeros(datsp.shape, dtype=datsp.dtype)
datsp.toarray(out=out)
assert_equal(out, dat1d)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.0
datsp.toarray(out=out)
assert_equal(out, dat1d)
# np.dot does not work with sparse matrices (unless scalars)
# so this is testing whether dat1d matches datsp.toarray()
a = np.array([1.0, 2.0, 3.0, 4.0])
dense_dot_dense = np.dot(a, dat1d)
check = np.dot(a, datsp.toarray())
assert_equal(dense_dot_dense, check)
b = np.array([1.0, 2.0, 3.0, 4.0])
dense_dot_dense = np.dot(dat1d, b)
check = np.dot(datsp.toarray(), b)
assert_equal(dense_dot_dense, check)
# Check bool data works.
spbool = spcreator(dat1d, dtype=bool)
arrbool = dat1d.astype(bool)
assert_equal(spbool.toarray(), arrbool)
def test_add(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
a = dat.copy()
a[0] = 2.0
b = datsp
c = b + a
assert_equal(c, b.toarray() + a)
# test broadcasting
# Note: cant add nonzero scalar to sparray. Can add len 1 array
c = b + a[0:1]
assert_equal(c, b.toarray() + a[0])
def test_radd(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
a = dat.copy()
a[0] = 2.0
b = datsp
c = a + b
assert_equal(c, a + b.toarray())
def test_rsub(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
assert_equal((dat - datsp), [0, 0, 0, 0])
assert_equal((datsp - dat), [0, 0, 0, 0])
assert_equal((0 - datsp).toarray(), -dat)
A = spcreator([1, -4, 0, 2], dtype='d')
assert_equal((dat - A), dat - A.toarray())
assert_equal((A - dat), A.toarray() - dat)
assert_equal(A.toarray() - datsp, A.toarray() - dat)
assert_equal(datsp - A.toarray(), dat - A.toarray())
# test broadcasting
assert_equal(dat[:1] - datsp, dat[:1] - dat)
def test_matmul_basic(self, spcreator):
A = np.array([[2, 0, 3.0], [0, 0, 0], [0, 1, 2]])
v = np.array([1, 0, 3])
Asp = spcreator(A)
vsp = spcreator(v)
# sparse result when both args are sparse and result not scalar
assert_equal((Asp @ vsp).toarray(), A @ v)
assert_equal(A @ vsp, A @ v)
assert_equal(Asp @ v, A @ v)
assert_equal((vsp @ Asp).toarray(), v @ A)
assert_equal(vsp @ A, v @ A)
assert_equal(v @ Asp, v @ A)
assert_equal(vsp @ vsp, v @ v)
assert_equal(v @ vsp, v @ v)
assert_equal(vsp @ v, v @ v)
assert_equal((Asp @ Asp).toarray(), A @ A)
assert_equal(A @ Asp, A @ A)
assert_equal(Asp @ A, A @ A)
def test_matvec(self, spcreator):
A = np.array([2, 0, 3.0])
Asp = spcreator(A)
col = np.array([[1, 2, 3]]).T
assert_allclose(Asp @ col, Asp.toarray() @ col)
assert (A @ np.array([1, 2, 3])).shape == ()
assert Asp @ np.array([1, 2, 3]) == 11
assert (Asp @ np.array([1, 2, 3])).shape == ()
assert (Asp @ np.array([[1], [2], [3]])).shape == (1,)
# check result type
assert isinstance(Asp @ matrix([[1, 2, 3]]).T, np.ndarray)
# ensure exception is raised for improper dimensions
bad_vecs = [np.array([1, 2]), np.array([1, 2, 3, 4]), np.array([[1], [2]])]
for x in bad_vecs:
with pytest.raises(ValueError, match='dimension mismatch'):
Asp @ x
# The current relationship between sparse matrix products and array
# products is as follows:
dot_result = np.dot(Asp.toarray(), [1, 2, 3])
assert_allclose(Asp @ np.array([1, 2, 3]), dot_result)
assert_allclose(Asp @ [[1], [2], [3]], dot_result.T)
# Note that the result of Asp @ x is dense if x has a singleton dimension.
def test_rmatvec(self, spcreator, dat1d):
M = spcreator(dat1d)
assert_allclose([1, 2, 3, 4] @ M, np.dot([1, 2, 3, 4], M.toarray()))
row = np.array([[1, 2, 3, 4]])
assert_allclose(row @ M, row @ M.toarray())
def test_transpose(self, spcreator, dat1d):
for A in [dat1d, np.array([])]:
B = spcreator(A)
assert_equal(B.toarray(), A)
assert_equal(B.transpose().toarray(), A)
assert_equal(B.dtype, A.dtype)
def test_add_dense_to_sparse(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
sum1 = dat + datsp
assert_equal(sum1, dat + dat)
sum2 = datsp + dat
assert_equal(sum2, dat + dat)
def test_iterator(self, spcreator):
# test that __iter__ is compatible with NumPy
B = np.arange(5)
A = spcreator(B)
if A.format not in ['coo', 'dia', 'bsr']:
for x, y in zip(A, B):
assert_equal(x, y)
def test_resize(self, spcreator):
# resize(shape) resizes the matrix in-place
D = np.array([1, 0, 3, 4])
S = spcreator(D)
assert S.resize((3,)) is None
assert_equal(S.toarray(), [1, 0, 3])
S.resize((5,))
assert_equal(S.toarray(), [1, 0, 3, 0, 0])
| TestCommon1D |
python | doocs__leetcode | solution/1400-1499/1402.Reducing Dishes/Solution.py | {
"start": 0,
"end": 274
} | class ____:
def maxSatisfaction(self, satisfaction: List[int]) -> int:
satisfaction.sort(reverse=True)
ans = s = 0
for x in satisfaction:
s += x
if s <= 0:
break
ans += s
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess21.py | {
"start": 319,
"end": 667
} | class ____(Generic[T]):
@overload
def __get__(self, instance: None, owner) -> Self: ...
@overload
def __get__(self, instance: object, owner) -> T: ...
def __get__(self, instance: object | None, owner) -> Self | T: ...
def __set__(self, instance: object, value: T) -> None: ...
def is_null(self) -> bool: ...
| Descriptor |
python | django__django | tests/admin_views/tests.py | {
"start": 367266,
"end": 369926
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.s1 = State.objects.create(name="New York")
cls.s2 = State.objects.create(name="Illinois")
cls.s3 = State.objects.create(name="California")
cls.c1 = City.objects.create(state=cls.s1, name="New York")
cls.c2 = City.objects.create(state=cls.s2, name="Chicago")
cls.c3 = City.objects.create(state=cls.s3, name="San Francisco")
cls.r1 = Restaurant.objects.create(city=cls.c1, name="Italian Pizza")
cls.r2 = Restaurant.objects.create(city=cls.c1, name="Boulevard")
cls.r3 = Restaurant.objects.create(city=cls.c2, name="Chinese Dinner")
cls.r4 = Restaurant.objects.create(city=cls.c2, name="Angels")
cls.r5 = Restaurant.objects.create(city=cls.c2, name="Take Away")
cls.r6 = Restaurant.objects.create(city=cls.c3, name="The Unknown Restaurant")
cls.w1 = Worker.objects.create(work_at=cls.r1, name="Mario", surname="Rossi")
cls.w2 = Worker.objects.create(
work_at=cls.r1, name="Antonio", surname="Bianchi"
)
cls.w3 = Worker.objects.create(work_at=cls.r1, name="John", surname="Doe")
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(
reverse("admin:admin_views_state_change", args=(self.s1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.c1.pk))
)
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(
reverse("admin:admin_views_city_change", args=(self.c1.pk,))
)
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(
response, reverse("admin:view_on_site", args=(content_type_pk, self.r1.pk))
)
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(
reverse("admin:admin_views_restaurant_change", args=(self.r1.pk,))
)
self.assertContains(
response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name)
)
@override_settings(ROOT_URLCONF="admin_views.urls")
| InlineAdminViewOnSiteTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py | {
"start": 480,
"end": 597
} | class ____:
a: str = 0
b = field()
c: int = foo()
d = list()
@frozen() # auto_attribs = None => True
| C |
python | astropy__astropy | astropy/io/fits/fitsrec.py | {
"start": 4247,
"end": 55215
} | class ____(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
_load_variable_length_data = True
def __new__(cls, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(cls, input.shape, input.dtype, buf=input.data)
else:
self = np.recarray.__new__(
cls, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_tbsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._tbsize = obj._tbsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._tbsize = getattr(obj, "_tbsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._tbsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
nrows = max(dim, nrows)
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
# The input column had an empty array, so just use the fill
# value
continue
n = min(len(arr), nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif column._physical_values and column._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# recarray.__repr__ hard-codes the name of the class, so we overwrite.
# The following is mostly a straight copy except for the name change
# and for treating using str to typeset integer -- the latter to fix
# the case where the integer columns are scaled (see gh-17583). Also,
# removed a branch for "if the user is playing strange game with dtypes".
#
# FIXME: recarray removes the "numpy.record" mention in the dtype repr,
# we could do the same in a future version
repr_dtype = self.dtype
# if repr_dtype.type is np.record:
# repr_dtype = np.dtype((np.void, repr_dtype))
prefix = "FITS_rec("
fmt = "FITS_rec(%s,%sdtype=%s)"
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = np.array2string(
self, separator=", ", prefix=prefix, suffix=",", formatter=dict(int=str)
)
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),) # noqa: UP031
lf = "\n" + " " * len(prefix)
return fmt % (lst, lf, repr_dtype)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._heapoffset = self._heapoffset
out._heapsize = self._heapsize
out._tbsize = self._tbsize
out._gap = self._gap
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
f"Input tuple or list required to have {self._nfields} elements."
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
new._col_weakrefs = weakref.WeakSet()
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP) and self._load_variable_length_data:
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
if column.dim:
vla_shape = tuple(
reversed(tuple(map(int, column.dim.strip("()").split(","))))
)
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
f"Could not find heap data for the {column.name!r} variable-length "
"array column."
)
for idx in range(len(self)):
offset = int(field[idx, 1]) + self._heapoffset
count = int(field[idx, 0])
if recformat.dtype == "S":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
if column.dim and len(vla_shape) > 1:
# The VLA is reshaped consistently with TDIM instructions
if vla_shape[0] == 1:
dummy[idx] = dummy[idx].reshape(1, len(dummy[idx]))
else:
vla_dim = vla_shape[1:]
vla_first = int(len(dummy[idx]) / np.prod(vla_dim))
dummy[idx] = dummy[idx].reshape((vla_first,) + vla_dim)
dummy[idx] = dummy[idx].view(dummy[idx].dtype.newbyteorder(">"))
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
raise ValueError(
f"{exc}; the header may be missing the necessary "
f"TNULL{self.names.index(column.name) + 1} keyword or the table "
"contains invalid data"
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors
index = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1:
# No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems and not isinstance(recformat, _FormatP):
warnings.warn(
f"TDIM{index + 1} value {self._coldefs[index].dims:d} does not "
f"fit with the size of the array items ({actual_nitems:d}). "
f"TDIM{index + 1:d} will be ignored."
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
f"Overflow detected while applying TZERO{index + 1:d}. "
"Returning unscaled data."
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim and not isinstance(recformat, _FormatP):
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self, try_from_disk=True):
"""
Returns heap data (if present).
If ``try_from_disk=True`` and if data is read from a file, heap data
is a pointer into the table's raw data.
Otherwise it is computed from the in-memory arrays.
This is returned as a numpy byte array.
"""
if (
try_from_disk
and self._heapsize
and (raw_data := self._get_raw_data()) is not None
):
# Read the heap from disk
raw_data = raw_data.view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
# Data is only in memory so create the heap data, one column
# at a time, in the order that the data pointers appear in the
# column (regardless if that data pointer has a different,
# previous heap offset listed)
data = []
for idx in range(self._nfields):
# data should already be byteswapped from the caller
# using _binary_table_byte_swap
if not isinstance(self.columns._recformats[idx], _FormatP):
continue
for row in self.field(idx):
if len(row) > 0:
data.append(row.view(type=np.ndarray, dtype=np.ubyte))
if data:
return np.concatenate(data)
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self._tbsize + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for index, name in enumerate(self.dtype.names):
column = self._coldefs[index]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, index)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'S' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [np.prod(arr.shape) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = scale_factors
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
# Cast before subtracting to avoid overflow problems.
dummy -= np.array(bzero).astype(dummy.dtype, casting="unsafe")
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(index, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(index, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
f"Could not save column '{self._coldefs[col_idx].name}': "
"Contains characters that cannot be encoded as ASCII as required "
"by FITS, starting at the index "
f"{exc.index[0] if len(exc.index) == 1 else exc.index!r} of the "
f"column, and the index {exc.start} of the string at that location."
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
f"Column {col_idx + 1} starting point overlaps the previous column."
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
f"Value {value!r} does not fit into the output's itemsize of "
f"{spans[col_idx]}."
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
| FITS_rec |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 28478,
"end": 29196
} | class ____(BuiltinClass):
"""Implementation of builtins.dict."""
_NAME = "dict"
def call(self, node, func, args, alias_map=None):
if not args.has_non_namedargs():
# special-case a dict constructor with explicit k=v args
d = abstract.Dict(self.ctx)
for k, v in args.namedargs.items():
d.set_str_item(node, k, v)
return node, d.to_variable(node)
else:
return super().call(node, func, args, alias_map)
def get_special_attribute(self, node, name, valself):
# For doing something like getting the __getitem__ attribute to subscript
# dict, we want to use the real dict type.
return self.ctx.convert.dict_type.get_special_attribute(node, name, valself)
| Dict |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/activation_functions.py | {
"start": 1845,
"end": 1992
} | class ____():
def __call__(self, x):
return np.log(1 + np.exp(x))
def gradient(self, x):
return 1 / (1 + np.exp(-x))
| SoftPlus |
python | langchain-ai__langchain | libs/langchain/langchain_classic/storage/encoder_backed.py | {
"start": 199,
"end": 5581
} | class ____(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
```python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer,
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
```
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an `EncodedStore`.
Args:
store: The underlying byte store to wrap.
key_encoder: Function to encode keys from type `K` to strings.
value_serializer: Function to serialize values from type `V` to bytes.
value_deserializer: Function to deserialize bytes back to type V.
"""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> list[V | None]:
"""Get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be `None`.
"""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
async def amget(self, keys: Sequence[K]) -> list[V | None]:
"""Async get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be `None`.
"""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = await self.store.amget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
"""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Async set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
"""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
await self.store.amset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values.
Args:
keys: A sequence of keys to delete.
"""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
async def amdelete(self, keys: Sequence[K]) -> None:
"""Async delete the given keys and their associated values.
Args:
keys: A sequence of keys to delete.
"""
encoded_keys = [self.key_encoder(key) for key in keys]
await self.store.amdelete(encoded_keys)
def yield_keys(
self,
*,
prefix: str | None = None,
) -> Iterator[K] | Iterator[str]:
"""Get an iterator over keys that match the given prefix.
Args:
prefix: The prefix to match.
Yields:
Keys that match the given prefix.
"""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
async def ayield_keys(
self,
*,
prefix: str | None = None,
) -> AsyncIterator[K] | AsyncIterator[str]:
"""Async get an iterator over keys that match the given prefix.
Args:
prefix: The prefix to match.
Yields:
Keys that match the given prefix.
"""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
async for key in self.store.ayield_keys(prefix=prefix):
yield key
| EncoderBackedStore |
python | getsentry__sentry | src/sentry/migrations/0913_split_discover_dataset_dashboards_self_hosted.py | {
"start": 2481,
"end": 4672
} | class ____(TypesClass):
LINE_CHART = 0
AREA_CHART = 1
STACKED_AREA_CHART = 2
BAR_CHART = 3
TABLE = 4
BIG_NUMBER = 6
TOP_N = 7
TYPES = [
(LINE_CHART, "line"),
(AREA_CHART, "area"),
(STACKED_AREA_CHART, "stacked_area"),
(BAR_CHART, "bar"),
(TABLE, "table"),
(BIG_NUMBER, "big_number"),
(TOP_N, "top_n"),
]
TYPE_NAMES = [t[1] for t in TYPES]
def split_discover_dataset_dashboards_self_hosted(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
DashboardWidgetQuery = apps.get_model("sentry", "DashboardWidgetQuery")
catch_all_unsplit_widgets = Q(
widget__widget_type=DashboardWidgetTypes.DISCOVER,
) & ~Q(
widget__discover_widget_split__in=[
DashboardWidgetTypes.ERROR_EVENTS,
DashboardWidgetTypes.TRANSACTION_LIKE,
]
)
queryset = DashboardWidgetQuery.objects.filter(
catch_all_unsplit_widgets,
).select_related("widget__dashboard__organization")
for widget_query in RangeQuerySetWrapperWithProgressBar(queryset):
try:
_get_and_save_split_decision_for_dashboard_widget(widget_query, dry_run=False)
except Exception:
widget_query.widget.discover_widget_split = DashboardWidgetTypes.ERROR_EVENTS
widget_query.widget.dataset_source = DatasetSourcesTypes.UNKNOWN.value
widget_query.widget.save()
def reverse_split_discover_dataset_dashboards_self_hosted(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
DashboardWidgetQuery = apps.get_model("sentry", "DashboardWidgetQuery")
all_split_widgets = Q(
widget__discover_widget_split__in=[
DashboardWidgetTypes.ERROR_EVENTS,
DashboardWidgetTypes.TRANSACTION_LIKE,
]
)
queryset = DashboardWidgetQuery.objects.filter(all_split_widgets)
for widget_query in RangeQuerySetWrapperWithProgressBar(queryset):
widget_query.widget.discover_widget_split = None
widget_query.widget.dataset_source = DatasetSourcesTypes.UNKNOWN.value
widget_query.widget.save()
| DashboardWidgetDisplayTypes |
python | kamyu104__LeetCode-Solutions | Python/count-houses-in-a-circular-street-ii.py | {
"start": 213,
"end": 644
} | class ____(object):
def houseCount(self, street, k):
"""
:type street: Street
:type k: int
:rtype: int
"""
while not street.isDoorOpen():
street.moveRight()
result = 0
for i in xrange(k+1):
if i and street.isDoorOpen():
street.closeDoor()
result = i
street.moveRight()
return result
| Solution |
python | explosion__spaCy | spacy/lang/es/lemmatizer.py | {
"start": 117,
"end": 16016
} | class ____(Lemmatizer):
"""
Spanish rule-based lemmatizer with morph-based rule selection.
"""
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_rules", "lemma_rules_groups", "lemma_index", "lemma_exc"]
return (required, [])
else:
return super().get_lookups_config(mode)
def rule_lemmatize(self, token: Token) -> List[str]:
cache_key = (token.orth, token.pos, str(token.morph))
if cache_key in self.cache:
return self.cache[cache_key]
string = token.text
pos = token.pos_.lower()
features = set(token.morph)
if pos in ("", "eol", "space"):
return [string.lower()]
if pos in (
"adp",
"cconj",
"intj",
"part",
"propn",
"punct",
"sconj",
"sym",
"x",
):
if token.is_sent_start and pos != "propn":
return [string.lower()]
else:
return [string]
string = string.lower()
exc = self.lookups.get_table("lemma_exc").get(pos, {}).get(string)
if exc is not None:
lemmas = list(exc)
else:
if pos == "aux":
rule_pos = "verb"
else:
rule_pos = pos
rule = self.select_rule(rule_pos, list(features))
index = self.lookups.get_table("lemma_index").get(rule_pos, [])
lemmas = getattr(self, "lemmatize_" + rule_pos)(
string, features, rule, index
)
# Remove duplicates but preserve the ordering
lemmas = list(dict.fromkeys(lemmas))
self.cache[cache_key] = lemmas
return lemmas
def select_rule(self, pos: str, features: List[str]) -> Optional[str]:
groups = self.lookups.get_table("lemma_rules_groups")
if pos in groups:
for group in groups[pos]:
if set(group[1]).issubset(features):
return group[0]
return None
def lemmatize_adj(
self, word: str, features: List[str], rule: str, index: List[str]
) -> List[str]:
"""
Lemmatize an adjective.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Initialize empty lists for the generated lemmas
possible_lemmas = []
selected_lemmas = []
# Apply lemmatization rules
for old, new in self.lookups.get_table("lemma_rules").get(rule, []):
possible_lemma = re.sub(old + "$", new, word)
if possible_lemma != word:
possible_lemmas.append(possible_lemma)
# Additional rule for plurals that go from esdrújula to grave and end in
# 'n' or 's', e.g., jóvenes -> joven
additional_lemmas = []
if "Number=Plur" in features:
for possible_lemma in possible_lemmas:
if possible_lemma.endswith("n") or possible_lemma.endswith("s"):
for old, new in self.lookups.get_table("lemma_rules").get(
"accents", []
):
additional_lemmas.append(re.sub(old, new, possible_lemma))
possible_lemmas.extend(additional_lemmas)
for lemma in possible_lemmas:
if lemma in index:
selected_lemmas.append(lemma)
# If one or more of the created possible lemmas are in the lookup list,
# return all of them
if len(selected_lemmas) > 0:
return selected_lemmas
elif len(possible_lemmas) > 0:
return possible_lemmas
else:
return [word]
def lemmatize_adv(
self, word: str, features: List[str], rule: str, index: List[str]
) -> List[str]:
"""
Lemmatize an adverb.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Apply lemmatization rules
for old, new in self.lookups.get_table("lemma_rules").get("adverbs", []):
if word == old:
return [new]
# If none of the rules applies, return the original word
return [word]
def lemmatize_det(
self, word: str, features: List[str], rule: str, index: List[str]
) -> List[str]:
"""
Lemmatize a determiner.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Initialize empty lists for the generated lemmas
possible_lemmas = []
selected_lemmas = []
# First, search in rules specific to determiners
for old, new in self.lookups.get_table("lemma_rules").get("det", []):
if word == old:
return [new]
# If none of the specific rules apply, search in the common rules for
# determiners and pronouns that follow a unique pattern for
# lemmatization. If the word is in the list, return the corresponding
# lemma.
for old, new in self.lookups.get_table("lemma_rules").get(
"det_and_pron_fixed", []
):
if word == old:
return [new]
# If the word is not in the list of unique determiners and pronouns,
# apply general rules of lemmatization. Include the original word in the # list of possible lemmas.
for old, new in self.lookups.get_table("lemma_rules").get(
"det_and_pron_general", []
):
possible_lemma = re.sub(old + "$", new, word)
possible_lemmas.append(possible_lemma)
possible_lemmas.append(word)
if len(possible_lemmas) == 1:
return possible_lemmas
elif len(possible_lemmas) > 1:
for lemma in possible_lemmas:
if lemma in index:
selected_lemmas.append(lemma)
if len(selected_lemmas) >= 1:
return selected_lemmas
else:
return possible_lemmas
else:
return []
def lemmatize_noun(
self, word: str, features: List[str], rule: str, index: List[str]
) -> List[str]:
"""
Lemmatize a noun.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Initialize empty lists for the generated lemmas
possible_lemmas = []
selected_lemmas = []
# Apply lemmatization rules
for old, new in self.lookups.get_table("lemma_rules").get(rule, []):
possible_lemma = re.sub(old + "$", new, word)
if possible_lemma != word:
possible_lemmas.append(possible_lemma)
# Additional rule for plurals that go from esdrújula to grave and end in
# 'n' or 's', e.g., órdenes -> orden, exámenes -> examen
additional_lemmas = []
if "Number=Plur" in features:
for possible_lemma in possible_lemmas:
if possible_lemma.endswith("n") or possible_lemma.endswith("s"):
for old, new in self.lookups.get_table("lemma_rules").get(
"accents", []
):
additional_lemmas.append(re.sub(old, new, possible_lemma))
possible_lemmas.extend(additional_lemmas)
for lemma in possible_lemmas:
if lemma in index:
selected_lemmas.append(lemma)
# If one or more of the created possible lemmas are in the lookup list,
# return all of them
if len(selected_lemmas) > 0:
return selected_lemmas
elif len(possible_lemmas) > 0:
return possible_lemmas
else:
return [word]
def lemmatize_num(
self, word: str, features: List[str], rule: str, index: List[str]
) -> List[str]:
"""
Lemmatize a numeral.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# If the word is in the list of rules for numerals, return the
# corresponding lemma
for old, new in self.lookups.get_table("lemma_rules").get("num", []):
if word == old:
return [new]
# Normalize punctuation
splitted_word = word.split(",")
if re.search(r"(\.)([0-9]{3})$", splitted_word[0]):
word = re.sub(r"\.", r"", word)
word = re.sub(r",", r".", word)
return [word]
def lemmatize_pron(
self, word: str, features: List[str], rule: Optional[str], index: List[str]
) -> List[str]:
"""
Lemmatize a pronoun.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Initialize empty lists for the generated lemmas
possible_lemmas = []
selected_lemmas = []
# First, search in rules specific to pronouns
for old, new in self.lookups.get_table("lemma_rules").get("pron", []):
if word == old:
return [new]
# If none of the specific rules apply, search in the common rules for
# determiners and pronouns that follow a unique pattern for
# lemmatization. If the word is in the list, return the corresponding
# lemma.
for old, new in self.lookups.get_table("lemma_rules").get(
"det_and_pron_fixed", []
):
if word == old:
return [new]
# If the word is not in the list of unique determiners and pronouns,
# apply general rules of lemmatization. Include the original word in the
# list of possible lemmas.
for old, new in self.lookups.get_table("lemma_rules").get(
"det_and_pron_general", []
):
possible_lemma = re.sub(old + "$", new, word)
if possible_lemma != word:
possible_lemmas.append(possible_lemma)
possible_lemmas.append(word)
if len(possible_lemmas) == 1:
return possible_lemmas
elif len(possible_lemmas) > 1:
for lemma in possible_lemmas:
if lemma in index:
selected_lemmas.append(lemma)
if len(selected_lemmas) >= 1:
return selected_lemmas
else:
return possible_lemmas
else:
return []
def lemmatize_verb(
self, word: str, features: List[str], rule: Optional[str], index: List[str]
) -> List[str]:
"""
Lemmatize a verb.
word (str): The word to lemmatize.
features (List[str]): The morphological features as a list of Feat=Val
pairs.
index (List[str]): The POS-specific lookup list.
RETURNS (List[str]): The list of lemmas.
"""
# Exceptions for verb+pronoun(s)
if "PronType=Prs" in features:
return self.lemmatize_verb_pron(word, features, rule, index)
# Initialize empty lists for the generated lemmas
possible_lemmas = []
selected_lemmas = []
# Apply lemmatization rules
rule = str(rule or "")
for old, new in self.lookups.get_table("lemma_rules").get(rule, []):
possible_lemma = re.sub(old + "$", new, word)
if possible_lemma != word:
possible_lemmas.append(possible_lemma)
for lemma in possible_lemmas:
if lemma in index:
selected_lemmas.append(lemma)
if len(selected_lemmas) == 0:
# If none of the possible lemmas are in the lookup list,
# apply vocalic alternation rules and search in the lookup list
# again
for lemma in possible_lemmas:
for old, new in self.lookups.get_table("lemma_rules").get(
"voc_alt_1", []
):
if old in lemma:
for i, char in enumerate(lemma):
if char == old:
voc_alt_lemma = lemma[:i] + new + lemma[i + 1 :]
if voc_alt_lemma in index:
selected_lemmas.append(voc_alt_lemma)
for old, new in self.lookups.get_table("lemma_rules").get(
"voc_alt_2", []
):
if old in lemma:
voc_alt_lemma = lemma.replace(old, new, 1)
if voc_alt_lemma in index:
selected_lemmas.append(voc_alt_lemma)
# Additional rule for verbs that lose the accent mark when lemmatized,
# e.g., amplían -> ampliar
additional_lemmas = []
for possible_lemma in possible_lemmas:
for old, new in self.lookups.get_table("lemma_rules").get("accents", []):
additional_lemmas.append(re.sub(old, new, possible_lemma))
possible_lemmas.extend(additional_lemmas)
# If one or more of the created possible lemmas are in the lookup list,
# return all of them
if len(selected_lemmas) > 0:
return selected_lemmas
elif len(possible_lemmas) > 0:
return possible_lemmas
else:
return [word]
def lemmatize_verb_pron(
self, word: str, features: List[str], rule: Optional[str], index: List[str]
) -> List[str]:
# Strip and collect pronouns
pron_patt = "^(.*?)([mts]e|l[aeo]s?|n?os)$"
prons: List[str] = []
verb = word
m = re.search(pron_patt, verb)
while m is not None and len(prons) <= 3:
verb = re.sub(m.group(2) + "$", "", verb)
prons = [m.group(2)] + prons
m = re.search(pron_patt, verb)
# Strip accents from verb form
for old, new in self.lookups.get_table("lemma_rules").get("accents", []):
verb = re.sub(old, new, verb)
# Lemmatize the verb and pronouns, checking for exceptions
exc = self.lookups.get_table("lemma_exc").get("verb", {}).get(verb)
if exc is not None:
verb_lemma = exc[0]
else:
rule = self.select_rule("verb", features)
verb_lemma = self.lemmatize_verb(
verb, features - {"PronType=Prs"}, rule, index # type: ignore[operator]
)[0]
pron_lemmas = []
for pron in prons:
exc = self.lookups.get_table("lemma_exc").get("pron", {}).get(pron)
if exc is not None:
pron_lemmas.append(exc[0])
else:
rule = self.select_rule("pron", features)
pron_lemmas.append(self.lemmatize_pron(pron, features, rule, index)[0])
return [verb_lemma + " " + " ".join(pron_lemmas)]
| SpanishLemmatizer |
python | django__django | tests/db_functions/models.py | {
"start": 1931,
"end": 2063
} | class ____(models.Model):
f1 = models.FloatField(null=True, blank=True)
f2 = models.FloatField(null=True, blank=True)
| FloatModel |
python | huggingface__transformers | tests/models/starcoder2/test_modeling_starcoder2.py | {
"start": 1484,
"end": 6772
} | class ____(unittest.TestCase):
def test_starcoder2_batched_generation_sdpa(self):
EXPECTED_TEXT = [
"Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on",
"def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app",
]
model_id = "bigcode/starcoder2-7b"
model = Starcoder2ForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="sdpa"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
text = ["Hello my name is Younes and", "def hello_world():"]
inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=40, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
def test_starcoder2_batched_generation_eager(self):
EXPECTED_TEXT = [
"Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on",
"def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app",
]
model_id = "bigcode/starcoder2-7b"
model = Starcoder2ForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="eager"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
text = ["Hello my name is Younes and", "def hello_world():"]
inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=40, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@require_flash_attn
@pytest.mark.flash_attn_test
def test_starcoder2_batched_generation_fa2(self):
EXPECTED_TEXT = [
"Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on",
"def hello_world():\n\treturn 'Hello World!'\n\n@app.route('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app",
]
model_id = "bigcode/starcoder2-7b"
model = Starcoder2ForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
text = ["Hello my name is Younes and", "def hello_world():"]
inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=40, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@require_bitsandbytes
def test_starcoder2_batched_generation_4bit(self):
expectations = Expectations(
{
(None, None): [
'Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is related to the topic of "How to make a game". I am currently working on a project',
'def hello_world():\n\treturn "Hello World"\n\n@app.route(\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\n@app.route',
],
("cuda", 8): [
"Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is aimed at creating a new way of learning. I am hoping to create a new way of",
'def hello_world():\n\treturn "Hello World"\n\n@app.route(\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\n@app.route',
],
}
)
EXPECTED_TEXT = expectations.get_expectation()
model_id = "bigcode/starcoder2-7b"
model = Starcoder2ForCausalLM.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
text = ["Hello my name is Younes and", "def hello_world():"]
inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=40, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
| Starcoder2IntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/base_layer_utils.py | {
"start": 27960,
"end": 31329
} | class ____(object):
"""Keras wrapper for handling tracking.Trackable object saving and restoring.
This class handles Trackables in both V1 and V2 modes, ensuring that they can
be saved and restored with the correct data and without adding additional ops
on every save.
Attributes:
trackable: The trackable to wrap.
num_tensors: The number of tensors that this trackable requires for saving.
"""
def __init__(self, trackable):
if not isinstance(trackable, tracking.Trackable):
raise ValueError('%s is not a Trackable object.' % (trackable,))
self._trackable = trackable
self._distribute_strategy = distribute_lib.get_strategy()
saveables = saveable_object_util.saveable_objects_from_trackable(
trackable).values()
# 'Saveables' won't exist when we're passed a legacy TF1 table like
# a StaticHashTable.
if not saveables:
self._num_tensors = 0
self._setter = lambda weights: None
self._getter = lambda: []
elif len(saveables) == 1:
saveable = list(saveables)[0]
if ops.executing_eagerly_outside_functions():
# If we're in eager mode, we need to defer calling the Trackable's
# saveable() callable until data export time.
# However, it is safe to call the saveable as many times as we want, so
# we will call it now to figure out how many tensors this Trackable will
# produce.
self._saveable = saveable
self._num_tensors = len(self._saveable().specs)
self._setter = lambda weights: self._saveable().restore(weights, None)
self._getter = lambda: [spec.tensor for spec in self._saveable().specs]
else:
# If we're in Graph mode, we need to evaluate the Saveable only once and
# cache the resulting restore graph. Failing to do this will result in
# new assignment ops being added to the graph each time set_weights() is
# called.
self._placeholder_tensors = []
self._saveable = saveable()
self._num_tensors = len(self._saveable.specs)
for spec in self._saveable.specs:
tensor = spec.tensor
self._placeholder_tensors.append(
array_ops.placeholder(tensor.dtype, tensor.shape))
self._assign_op = self._saveable.restore(self._placeholder_tensors,
None)
self._setter = self._set_weights_v1
self._getter = lambda: [spec.tensor for spec in self._saveable.specs]
else:
raise ValueError('Only Trackables with one Saveable are supported. '
'The Trackable %s has %d Saveables.' %
(trackable, len(saveables)))
@property
def num_tensors(self):
return self._num_tensors
def set_weights(self, weights):
if len(weights) != self._num_tensors:
raise ValueError(
('Weight handler for trackable %s received the wrong number of ' +
'weights: expected %s, got %s.') %
(self._trackable, self._num_tensors, len(weights)))
self._setter(weights)
def get_tensors(self):
return self._getter()
def _set_weights_v1(self, weights):
feed_dict = {}
for idx, tensor in enumerate(weights):
feed_dict[self._placeholder_tensors[idx]] = tensor
backend.get_session().run(self._assign_op, feed_dict)
| TrackableWeightHandler |
python | optuna__optuna | optuna/storages/_rdb/models.py | {
"start": 11996,
"end": 13517
} | class ____(BaseModel):
__tablename__ = "trial_params"
__table_args__: Any = (UniqueConstraint("trial_id", "param_name"),)
param_id = _Column(Integer, primary_key=True)
trial_id = _Column(Integer, ForeignKey("trials.trial_id"))
param_name = _Column(String(MAX_INDEXED_STRING_LENGTH))
param_value = _Column(Float(precision=FLOAT_PRECISION))
distribution_json = _Column(Text())
trial = orm.relationship(
TrialModel, backref=orm.backref("params", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_param_name(
cls, trial: TrialModel, param_name: str, session: orm.Session
) -> "TrialParamModel" | None:
param_distribution = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.param_name == param_name)
.one_or_none()
)
return param_distribution
@classmethod
def find_or_raise_by_trial_and_param_name(
cls, trial: TrialModel, param_name: str, session: orm.Session
) -> "TrialParamModel":
param_distribution = cls.find_by_trial_and_param_name(trial, param_name, session)
if param_distribution is None:
raise KeyError(NOT_FOUND_MSG)
return param_distribution
@classmethod
def where_trial_id(cls, trial_id: int, session: orm.Session) -> list["TrialParamModel"]:
trial_params = session.query(cls).filter(cls.trial_id == trial_id).all()
return trial_params
| TrialParamModel |
python | getsentry__sentry | src/sentry/integrations/jira/webhooks/uninstalled.py | {
"start": 583,
"end": 1490
} | class ____(JiraWebhookBase):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
"""
Webhook hit by Jira whenever someone uninstalls the Sentry integration from their Jira instance.
"""
def post(self, request: Request, *args, **kwargs) -> Response:
token = self.get_token(request)
rpc_integration = get_integration_from_jwt(
token=token,
path=request.path,
provider=self.provider,
query_params=request.GET,
method="POST",
)
integration = Integration.objects.get(id=rpc_integration.id)
bind_org_context_from_integration(integration.id, {"webhook": "uninstalled"})
sentry_sdk.set_tag("integration_id", integration.id)
integration.update(status=ObjectStatus.DISABLED)
return self.respond()
| JiraSentryUninstalledWebhook |
python | getsentry__sentry | src/sentry/snuba/sessions_v2.py | {
"start": 8068,
"end": 14580
} | class ____:
"""
This is the definition of the query the user wants to execute.
This is constructed out of the request params, and also contains a list of
`fields` and `groupby` definitions as [`ColumnDefinition`] objects.
"""
def __init__(
self,
query,
params,
query_config: SessionsQueryConfig,
limit: int | None = 0,
offset: int | None = 0,
):
self.query = query.get("query", "")
self.raw_fields = raw_fields = query.getlist("field", [])
self.raw_groupby = raw_groupby = query.getlist("groupBy", [])
self.raw_orderby = query.getlist("orderBy") # only respected by metrics implementation
self.limit = limit
self.offset = offset
self._query_config = query_config
if len(raw_fields) == 0:
raise InvalidField('Request is missing a "field"')
self.fields = {}
for key in raw_fields:
if key not in COLUMN_MAP:
from sentry.release_health.metrics_sessions_v2 import FIELD_MAP
if key in FIELD_MAP:
# HACK : Do not raise an error for metrics-only fields,
# Simply ignore them instead.
#
# It is important to note that this ignore can lead to the
# self.primary_column not being initialized.
continue
raise InvalidField(f'Invalid field: "{key}"')
self.fields[key] = COLUMN_MAP[key]
self.groupby = []
for key in raw_groupby:
if key not in GROUPBY_MAP:
raise InvalidField(f'Invalid groupBy: "{key}"')
self.groupby.append(GROUPBY_MAP[key])
start, end, rollup = get_constrained_date_range(
query,
allowed_resolution=query_config.allowed_resolution,
restrict_date_range=query_config.restrict_date_range,
)
self.rollup = rollup
self.start = start
self.end = end
self.params = params
query_columns = set()
for i, (field_name, field) in enumerate(self.fields.items()):
columns = field.get_snuba_columns(raw_groupby)
if i == 0 or field_name == "sum(session)": # Prefer first, but sum(session) always wins
self.primary_column = columns[0] # Will be used in order by
query_columns.update(columns)
for groupby in self.groupby:
query_columns.update(groupby.get_snuba_columns())
self.query_columns = list(query_columns)
query_groupby = set()
for groupby in self.groupby:
query_groupby.update(groupby.get_snuba_groupby())
self.query_groupby = list(query_groupby)
def to_query_builder_dict(self, orderby=None):
num_intervals = len(get_timestamps(self))
if num_intervals == 0:
raise ZeroIntervalsException
max_groups = SNUBA_LIMIT // num_intervals
query_builder_dict = {
"dataset": Dataset.Sessions,
"params": {
**self.params,
"start": self.start,
"end": self.end,
},
"selected_columns": self.query_columns,
"groupby_columns": self.query_groupby,
"query": self.query,
"orderby": orderby,
"limit": max_groups,
"granularity": self.rollup,
"config": QueryBuilderConfig(auto_aggregations=True),
}
if self._query_config.allow_session_status_query:
query_builder_dict.update({"extra_filter_allowlist_fields": ["session.status"]})
return query_builder_dict
def get_filter_conditions(self):
"""
Returns filter conditions for the query to be used for metrics queries, and hence excluding timestamp and
organization id condition that are later added by the metrics layer.
"""
conditions = SessionsV2QueryBuilder(**self.to_query_builder_dict()).where
filter_conditions = []
for condition in conditions:
self._check_supported_condition(condition)
# Exclude sessions "started" timestamp condition and org_id condition, as it is not needed for metrics
# queries.
if (
isinstance(condition, Condition)
and isinstance(condition.lhs, Column)
and condition.lhs.name in ["started", "org_id"]
):
continue
filter_conditions.append(condition)
return filter_conditions
@classmethod
def _check_supported_condition(cls, condition):
if isinstance(condition, BooleanCondition):
for nested_condition in condition.conditions:
cls._check_supported_condition(nested_condition)
elif isinstance(condition, Condition):
if isinstance(condition.lhs, Function):
# Since we moved to metrics backed sessions, we don't allow wildcard search anymore. The reason for this
# is that we don't store tag values as strings in the database, this makes wildcard match on the
# db impossible. The solution would be to lift it out at the application level, but it will impact
# performance.
if condition.lhs.function == "match":
raise InvalidField("Invalid condition: wildcard search is not supported")
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self.__dict__)})"
MAX_POINTS = 1000 # max. points in time
ONE_DAY = timedelta(days=1).total_seconds()
ONE_HOUR = timedelta(hours=1).total_seconds()
ONE_MINUTE = timedelta(minutes=1).total_seconds()
#: We know that a limit of 1000 is too low for some UI use cases, e.g.
#: https://sentry.io/organizations/sentry/projects/sentry/?project=1&statsPeriod=14d
#: (2 * 14d * 24h * 4 statuses = 2688 groups).
#: At the same time, there is no justification from UI perspective to increase
#: the limit to the absolute maximum of 10000 (see https://github.com/getsentry/snuba/blob/69862db3ad224b48810ac1bb3001e4c446bf0aff/snuba/query/snql/parser.py#L908-L909).
#: -> Let's go with 5000, so we can still serve the 50 releases over 90d that are used here:
#: https://github.com/getsentry/sentry/blob/d6ed7c12844b70edb6a93b4f33d3e60e8516105a/static/app/views/releases/list/releasesAdoptionChart.tsx#L91-L96
SNUBA_LIMIT = 5000
| QueryDefinition |
python | google__pytype | pytype/errors/error_types.py | {
"start": 1185,
"end": 1342
} | class ____(FailedFunctionCall):
"""For objects that don't have __call__."""
def __init__(self, obj):
super().__init__()
self.obj = obj
| NotCallable |
python | doocs__leetcode | solution/0000-0099/0086.Partition List/Solution.py | {
"start": 151,
"end": 602
} | class ____:
def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:
l = ListNode()
r = ListNode()
tl, tr = l, r
while head:
if head.val < x:
tl.next = head
tl = tl.next
else:
tr.next = head
tr = tr.next
head = head.next
tr.next = None
tl.next = r.next
return l.next
| Solution |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 212,
"end": 350
} | class ____(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
| Restaurant |
python | davidhalter__jedi | jedi/inference/value/dynamic_arrays.py | {
"start": 6300,
"end": 6991
} | class ____(ValueWrapper):
def __init__(self, wrapped_value, assigned_values, contextualized_key):
super().__init__(wrapped_value)
self._assigned_values = assigned_values
self._contextualized_key = contextualized_key
def py__getitem__(self, *args, **kwargs):
return self._wrapped_value.py__getitem__(*args, **kwargs) | self._assigned_values
def py__simple_getitem__(self, index):
actual = [
v.get_safe_value(_sentinel)
for v in self._contextualized_key.infer()
]
if index in actual:
return self._assigned_values
return self._wrapped_value.py__simple_getitem__(index)
| _Modification |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 11619,
"end": 13337
} | class ____(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
# Create source file (1.h5)
with h5.File(osp.join(self.tmpdir, '1.h5'), 'w') as f:
d = f.create_dataset('data', (10,), 'i4')
d[:] = np.arange(10)*10
def test_index_layout(self):
# Assemble virtual dataset (indexing target)
layout = h5.VirtualLayout((100,), 'i4')
inds = [3,6,20,25,33,47,70,75,96,98]
filename = osp.join(self.tmpdir, "1.h5")
vsource = h5.VirtualSource(filename, 'data', shape=(10,))
layout[inds] = vsource
outfile = osp.join(self.tmpdir, make_name('VDS{}.h5'))
# Assembly virtual dataset (indexing source)
layout2 = h5.VirtualLayout((6,), 'i4')
inds2 = [0,1,4,5,8]
layout2[1:] = vsource[inds2]
# Add virtual datasets to output file and close
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('/data', layout, fillvalue=-5)
f.create_virtual_dataset(b'/data2', layout2, fillvalue=-3)
# Read data from virtual datasets
with h5.File(outfile, 'r') as f:
data = f['/data'][()]
data2 = f['/data2'][()]
# Verify
assert_array_equal(data[inds], np.arange(10)*10)
assert_array_equal(data2[1:], [0,10,40,50,80])
mask = np.zeros(100)
mask[inds] = 1
self.assertEqual(data[mask == 0].min(), -5)
self.assertEqual(data[mask == 0].max(), -5)
self.assertEqual(data2[0], -3)
def tearDown(self):
shutil.rmtree(self.tmpdir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
| IndexingTestCase |
python | Netflix__metaflow | metaflow/_vendor/click/parser.py | {
"start": 6075,
"end": 15691
} | class ____(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = {"-", "--"}
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(
state.largs + state.rargs, [x.nargs for x in self._args]
)
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == "--":
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage(opt, "{} option does not take a value".format(opt))
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append("{}{}".format(prefix, "".join(unknown_options)))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if "=" in arg:
long_opt, explicit_value = arg.split("=", 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| OptionParser |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 231917,
"end": 236953
} | class ____(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session():
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testWithIndexMap(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
ssim_locals = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_index_map=True)
self.assertEqual(ssim_locals.shape, (1, 6, 6))
ssim_global = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
axes = constant_op.constant([-2, -1], dtype=dtypes.int32)
self.assertAllClose(ssim_global, math_ops.reduce_mean(ssim_locals, axes))
| SSIMTest |
python | huggingface__transformers | src/transformers/models/maskformer/image_processing_maskformer.py | {
"start": 1703,
"end": 13293
} | class ____(ImagesKwargs, total=False):
r"""
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
"""
size_divisor: int
ignore_index: Optional[int]
do_reduce_labels: bool
num_labels: Optional[int]
def max_across_indices(values: Iterable[Any]) -> list[Any]:
"""
Return the maximum value across all indices of an iterable of values.
"""
return [max(values_i) for values_i in zip(*values)]
# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
def get_max_height_width(
images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> list[int]:
"""
Get the maximum height and width across all images in a batch.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if input_data_format == ChannelDimension.FIRST:
_, max_height, max_width = max_across_indices([img.shape for img in images])
elif input_data_format == ChannelDimension.LAST:
max_height, max_width, _ = max_across_indices([img.shape for img in images])
else:
raise ValueError(f"Invalid channel dimension format: {input_data_format}")
return (max_height, max_width)
# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
def make_pixel_mask(
image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask
# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
def binary_mask_to_rle(mask):
"""
Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
mask (`torch.Tensor` or `numpy.array`):
A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
segment_id or class_id.
Returns:
`List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
format.
"""
if is_torch_tensor(mask):
mask = mask.numpy()
pixels = mask.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return list(runs)
# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
def convert_segmentation_to_rle(segmentation):
"""
Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
segmentation (`torch.Tensor` or `numpy.array`):
A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
Returns:
`list[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
"""
segment_ids = torch.unique(segmentation)
run_length_encodings = []
for idx in segment_ids:
mask = torch.where(segmentation == idx, 1, 0)
rle = binary_mask_to_rle(mask)
run_length_encodings.append(rle)
return run_length_encodings
# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
"""
Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
`labels`.
Args:
masks (`torch.Tensor`):
A tensor of shape `(num_queries, height, width)`.
scores (`torch.Tensor`):
A tensor of shape `(num_queries)`.
labels (`torch.Tensor`):
A tensor of shape `(num_queries)`.
object_mask_threshold (`float`):
A number between 0 and 1 used to binarize the masks.
Raises:
`ValueError`: Raised when the first dimension doesn't match in all input tensors.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
< `object_mask_threshold`.
"""
if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
raise ValueError("mask, scores and labels must have the same shape!")
to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
return masks[to_keep], scores[to_keep], labels[to_keep]
# Copied from transformers.models.detr.image_processing_detr.check_segment_validity
def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
# Get the mask associated with the k class
mask_k = mask_labels == k
mask_k_area = mask_k.sum()
# Compute the area of all the stuff in query k
original_area = (mask_probs[k] >= mask_threshold).sum()
mask_exists = mask_k_area > 0 and original_area > 0
# Eliminate disconnected tiny segments
if mask_exists:
area_ratio = mask_k_area / original_area
if not area_ratio.item() > overlap_mask_area_threshold:
mask_exists = False
return mask_exists, mask_k
# Copied from transformers.models.detr.image_processing_detr.compute_segments
def compute_segments(
mask_probs,
pred_scores,
pred_labels,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_size: Optional[tuple[int, int]] = None,
):
height = mask_probs.shape[1] if target_size is None else target_size[0]
width = mask_probs.shape[2] if target_size is None else target_size[1]
segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
segments: list[dict] = []
if target_size is not None:
mask_probs = nn.functional.interpolate(
mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
)[0]
current_segment_id = 0
# Weigh each mask by its prediction score
mask_probs *= pred_scores.view(-1, 1, 1)
mask_labels = mask_probs.argmax(0) # [height, width]
# Keep track of instances of each class
stuff_memory_list: dict[str, int] = {}
for k in range(pred_labels.shape[0]):
pred_class = pred_labels[k].item()
should_fuse = pred_class in label_ids_to_fuse
# Check if mask exists and large enough to be a segment
mask_exists, mask_k = check_segment_validity(
mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
)
if mask_exists:
if pred_class in stuff_memory_list:
current_segment_id = stuff_memory_list[pred_class]
else:
current_segment_id += 1
# Add current object segment to final segmentation map
segmentation[mask_k] = current_segment_id
segment_score = round(pred_scores[k].item(), 6)
segments.append(
{
"id": current_segment_id,
"label_id": pred_class,
"was_fused": should_fuse,
"score": segment_score,
}
)
if should_fuse:
stuff_memory_list[pred_class] = current_segment_id
return segmentation, segments
# TODO: (Amy) Move to image_transforms
def convert_segmentation_map_to_binary_masks(
segmentation_map: np.ndarray,
instance_id_to_semantic_id: Optional[dict[int, int]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
):
if do_reduce_labels and ignore_index is None:
raise ValueError("If `do_reduce_labels` is True, `ignore_index` must be provided.")
if do_reduce_labels:
segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1)
# Get unique ids (class or instance ids based on input)
all_labels = np.unique(segmentation_map)
# Drop background label if applicable
if ignore_index is not None:
all_labels = all_labels[all_labels != ignore_index]
# Generate a binary mask for each object instance
binary_masks = [(segmentation_map == i) for i in all_labels]
# Stack the binary masks
if binary_masks:
binary_masks = np.stack(binary_masks, axis=0)
else:
binary_masks = np.zeros((0, *segmentation_map.shape))
# Convert instance ids to class ids
if instance_id_to_semantic_id is not None:
labels = np.zeros(all_labels.shape[0])
for label in all_labels:
class_id = instance_id_to_semantic_id[label + 1 if do_reduce_labels else label]
labels[all_labels == label] = class_id - 1 if do_reduce_labels else class_id
else:
labels = all_labels
return binary_masks.astype(np.float32), labels.astype(np.int64)
def get_maskformer_resize_output_image_size(
image: np.ndarray,
size: Union[int, tuple[int, int], list[int], tuple[int]],
max_size: Optional[int] = None,
size_divisor: int = 0,
default_to_square: bool = True,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Computes the output size given the desired size.
Args:
image (`np.ndarray`):
The input image.
size (`int` or `tuple[int, int]` or `list[int]` or `tuple[int]`):
The size of the output image.
max_size (`int`, *optional*):
The maximum size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
default_to_square (`bool`, *optional*, defaults to `True`):
Whether to default to square if no size is provided.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If unset, will use the inferred format from the input.
Returns:
`tuple[int, int]`: The output size.
"""
output_size = get_resize_output_image_size(
input_image=image,
size=size,
default_to_square=default_to_square,
max_size=max_size,
input_data_format=input_data_format,
)
if size_divisor > 0:
height, width = output_size
height = int(math.ceil(height / size_divisor) * size_divisor)
width = int(math.ceil(width / size_divisor) * size_divisor)
output_size = (height, width)
return output_size
@requires(backends=("vision",))
| MaskFormerImageProcessorKwargs |
python | lepture__authlib | authlib/oauth2/rfc7662/token_validator.py | {
"start": 124,
"end": 1378
} | class ____(TokenValidator):
TOKEN_TYPE = "bearer"
def introspect_token(self, token_string):
"""Request introspection token endpoint with the given token string,
authorization server will return token information in JSON format.
Developers MUST implement this method before using it::
def introspect_token(self, token_string):
# for example, introspection token endpoint has limited
# internal IPs to access, so there is no need to add
# authentication.
url = "https://example.com/oauth/introspect"
resp = requests.post(url, data={"token": token_string})
resp.raise_for_status()
return resp.json()
"""
raise NotImplementedError()
def authenticate_token(self, token_string):
return self.introspect_token(token_string)
def validate_token(self, token, scopes, request):
if not token or not token["active"]:
raise InvalidTokenError(
realm=self.realm, extra_attributes=self.extra_attributes
)
if self.scope_insufficient(token.get("scope"), scopes):
raise InsufficientScopeError()
| IntrospectTokenValidator |
python | getsentry__sentry | tests/sentry/event_manager/test_event_manager.py | {
"start": 4246,
"end": 4511
} | class ____:
def make_release_event(self, release_name: str, project_id: int) -> Event:
manager = EventManager(make_event(release=release_name))
manager.normalize()
event = manager.save(project_id)
return event
| EventManagerTestMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.