language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/_stream.py | {
"start": 233,
"end": 3541
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet"
_path_str = "scattercarpet.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | pydata__xarray | xarray/tests/test_utils.py | {
"start": 6419,
"end": 7018
} | class ____:
def test_sorted_uniform(self):
assert utils.is_uniform_spaced(np.arange(5))
def test_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([-2, 1, 89])
def test_not_sorted_uniform(self):
assert not utils.is_uniform_spaced([1, -1, 3])
def test_not_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([4, 1, 89])
def test_two_numbers(self):
assert utils.is_uniform_spaced([0, 1.7])
def test_relative_tolerance(self):
assert utils.is_uniform_spaced([0, 0.97, 2], rtol=0.1)
| Test_is_uniform_and_sorted |
python | neetcode-gh__leetcode | python/2482-difference-between-ones-and-zeros-in-row-and-column.py | {
"start": 0,
"end": 787
} | class ____:
def onesMinusZeros(self, grid: List[List[int]]) -> List[List[int]]:
m , n = len(grid), len(grid[0])
rowCount = [[0, 0] for _ in range(m)] # (zeros, ones)
colCount = [[0, 0] for _ in range(n)]
res = []
for r in range(m):
for c in range(n):
if grid[r][c] == 1:
rowCount[r][1] += 1
colCount[c][1] += 1
else:
rowCount[r][0] += 1
colCount[c][0] += 1
for r in range(m):
row =[]
for c in range(n):
row.append(rowCount[r][1] + colCount[c][1] -
rowCount[r][0] - colCount[c][0])
res.append(row)
return res
| Solution |
python | TheAlgorithms__Python | geometry/geometry.py | {
"start": 6346,
"end": 7588
} | class ____(Polygon):
"""
A geometric rectangle on a 2D surface.
>>> rectangle_one = Rectangle(5, 10)
>>> rectangle_one.perimeter()
30
>>> rectangle_one.area()
50
>>> Rectangle(-5, 10)
Traceback (most recent call last):
...
TypeError: length must be a positive numeric value.
"""
def __init__(self, short_side_length: float, long_side_length: float) -> None:
super().__init__()
self.short_side_length = short_side_length
self.long_side_length = long_side_length
self.post_init()
def post_init(self) -> None:
"""
>>> Rectangle(5, 10) # doctest: +NORMALIZE_WHITESPACE
Rectangle(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None),
Side(length=10, angle=Angle(degrees=90), next_side=None)])
"""
self.short_side = Side(self.short_side_length)
self.long_side = Side(self.long_side_length)
super().add_side(self.short_side)
super().add_side(self.long_side)
def perimeter(self) -> float:
return (self.short_side.length + self.long_side.length) * 2
def area(self) -> float:
return self.short_side.length * self.long_side.length
@dataclass
| Rectangle |
python | mlflow__mlflow | .claude/hooks/lint.py | {
"start": 3476,
"end": 6110
} | class ____:
tool_name: Literal["Edit", "Write"]
file_path: Path
@classmethod
def parse(cls) -> "HookInput | None":
# https://code.claude.com/docs/en/hooks#posttooluse-input
data = json.loads(sys.stdin.read())
tool_name = data.get("tool_name")
tool_input = data.get("tool_input")
if tool_name not in ("Edit", "Write"):
return None
file_path_str = tool_input.get("file_path")
if not file_path_str:
return None
file_path = Path(file_path_str)
if project_dir := os.environ.get("CLAUDE_PROJECT_DIR"):
file_path = file_path.relative_to(project_dir)
return cls(
tool_name=tool_name,
file_path=file_path,
)
def is_tracked(file_path: Path) -> bool:
result = subprocess.run(["git", "ls-files", "--error-unmatch", file_path], capture_output=True)
return result.returncode == 0
def get_source_and_diff_ranges(hook_input: HookInput) -> tuple[str, list[DiffRange]]:
if hook_input.tool_name == "Edit" and is_tracked(hook_input.file_path):
# For Edit on tracked files, use git diff to get only changed lines
diff_output = subprocess.check_output(
["git", "--no-pager", "diff", "-U0", "HEAD", "--", hook_input.file_path],
text=True,
)
diff_ranges = parse_diff_ranges(diff_output)
else:
# For Write or Edit on untracked files, lint the whole file
diff_ranges = [DiffRange(start=1, end=sys.maxsize)]
source = hook_input.file_path.read_text()
return source, diff_ranges
def main() -> int:
# Kill switch: disable hook if environment variable is set
if os.environ.get(KILL_SWITCH_ENV_VAR):
return 0
hook_input = HookInput.parse()
if not hook_input:
return 0
# Ignore non-Python files
if hook_input.file_path.suffix != ".py":
return 0
# Ignore non-test files
if not is_test_file(hook_input.file_path):
return 0
source, diff_ranges = get_source_and_diff_ranges(hook_input)
if errors := lint(hook_input.file_path, source, diff_ranges):
error_details = "\n".join(f" - {error}" for error in errors)
reason = (
f"Lint errors found:\n{error_details}\n\n"
f"To disable this hook, set {KILL_SWITCH_ENV_VAR}=1"
)
# Exit code 2 = blocking error. stderr is fed back to Claude.
# See: https://code.claude.com/docs/en/hooks#hook-output
sys.stderr.write(reason + "\n")
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
| HookInput |
python | PyCQA__pylint | tests/functional/d/dataclass/dataclass_with_field.py | {
"start": 286,
"end": 755
} | class ____:
"""Case class (group Item)"""
name: str
irr: float = 0
items: List[Item] = field(default_factory=lambda: [])
def add_item(self, item: Item) -> None:
"""Add an item to the item list."""
self.items.append(item)
def find_item(self, description: str) -> Item:
"""Find an item by description"""
return next(
(item for item in self.items if item.description == description), None
)
| Case |
python | numba__numba | numba/core/types/containers.py | {
"start": 6991,
"end": 7536
} | class ____(BaseTuple):
def __getitem__(self, i):
"""
Return element at position i
"""
return self.types[i]
def __len__(self):
# Beware: this makes Tuple(()) false-ish
return len(self.types)
def __iter__(self):
return iter(self.types)
@staticmethod
def is_types_iterable(types):
# issue 4463 - check if argument 'types' is iterable
if not isinstance(types, Iterable):
raise TypingError("Argument 'types' is not iterable")
| _HeterogeneousTuple |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/test_utils.py | {
"start": 11540,
"end": 13271
} | class ____(RunLauncher, ConfigurableClass):
def __init__(
self,
inst_data: Optional[ConfigurableClassData] = None,
bad_run_ids=None,
bad_user_code_run_ids=None,
):
self._inst_data = inst_data
self._queue = []
self._launched_run_ids = set()
self.bad_run_ids = bad_run_ids or set()
self.bad_user_code_run_ids = bad_user_code_run_ids or set()
super().__init__()
def launch_run(self, context): # pyright: ignore[reportIncompatibleMethodOverride]
run = context.dagster_run
check.inst_param(run, "run", DagsterRun)
check.invariant(run.status == DagsterRunStatus.STARTING)
if run.run_id in self.bad_run_ids:
raise Exception(f"Bad run {run.run_id}")
if run.run_id in self.bad_user_code_run_ids:
raise DagsterUserCodeUnreachableError(f"User code error launching run {run.run_id}")
self._queue.append(run)
self._launched_run_ids.add(run.run_id)
return run
def queue(self):
return self._queue
def did_run_launch(self, run_id):
return run_id in self._launched_run_ids
@classmethod
def config_type(cls):
return Shape(
{
"bad_run_ids": Field(Array(str), is_required=False),
"bad_user_code_run_ids": Field(Array(str), is_required=False),
}
)
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data=inst_data, **config_value)
@property
def inst_data(self):
return self._inst_data
def terminate(self, run_id):
check.not_implemented("Termintation not supported")
| MockedRunLauncher |
python | getsentry__sentry | tests/sentry/tasks/test_check_auth.py | {
"start": 694,
"end": 2820
} | class ____(TestCase):
@patch("sentry.tasks.auth.check_auth.check_auth_identities")
def test_simple(self, mock_check_auth_identities: MagicMock) -> None:
organization = self.create_organization(name="Test")
user = self.create_user(email="bar@example.com")
auth_provider = AuthProvider.objects.create(
organization_id=organization.id, provider="dummy"
)
self.create_member(
user_id=user.id, organization=organization, flags=OrganizationMember.flags["sso:linked"]
)
ai = AuthIdentity.objects.create(
auth_provider=auth_provider, user=user, last_synced=timezone.now() - timedelta(days=1)
)
check_auth()
updated_ai = AuthIdentity.objects.get(id=ai.id)
assert updated_ai.last_synced != ai.last_synced
assert updated_ai.last_verified == ai.last_verified
mock_check_auth_identities.apply_async.assert_called_once_with(
kwargs={"auth_identity_ids": [ai.id], "chunk_size": 100},
expires=AUTH_CHECK_INTERVAL - AUTH_CHECK_SKEW,
)
def test_processes_recursively(self) -> None:
organization = self.create_organization(name="Test")
auth_provider = AuthProvider.objects.create(
organization_id=organization.id, provider="dummy"
)
orig_timing = timezone.now() - timedelta(days=1)
ais = [
AuthIdentity.objects.create(
auth_provider=auth_provider,
user=self.create_user(),
ident=f"user_{i}",
last_synced=orig_timing,
last_verified=orig_timing,
)
for i in range(10)
]
for ai in ais:
self.create_member(
user_id=ai.user_id,
organization=organization,
flags=OrganizationMember.flags["sso:linked"],
)
with self.tasks():
check_auth(chunk_size=3)
for ai in ais:
ai.refresh_from_db()
assert ai.last_verified > orig_timing
@control_silo_test
| CheckAuthTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super2.py | {
"start": 184,
"end": 411
} | class ____:
def __init__(self, **kw: object) -> None:
pass
@classmethod
def factoryA(cls: type[T]) -> T:
return cls()
@classmethod
def get(cls: type[T], key: str) -> T:
return cls()
| A |
python | jazzband__django-formtools | tests/forms.py | {
"start": 426,
"end": 544
} | class ____(forms.Form):
name = forms.CharField()
attachment = forms.FileField(required=False)
| HashTestFormWithFile |
python | apache__airflow | providers/standard/src/airflow/providers/standard/exceptions.py | {
"start": 1046,
"end": 1171
} | class ____(AirflowExternalTaskSensorException):
"""Raised when the external DAG does not exist."""
| ExternalDagNotFoundError |
python | pytorch__pytorch | torch/_inductor/runtime/hints.py | {
"start": 6336,
"end": 7041
} | class ____(typing.NamedTuple):
argtypes: list[HalideInputSpec]
target: str
scheduler: str | None = None
scheduler_flags: dict[str, int | str] | None = None
cuda_device: int | None = None
def args(self) -> list[str]:
"""Command line args to pass to halide generator"""
args = [f"target={self.target}"]
if self.scheduler:
args.append(f"autoscheduler={self.scheduler}")
if self.scheduler_flags:
assert self.scheduler
for k, v in self.scheduler_flags.items():
args.append(f"autoscheduler.{k}={v}")
return args
def is_cuda(self) -> bool:
return self.cuda_device is not None
| HalideMeta |
python | kamyu104__LeetCode-Solutions | Python/apply-operations-to-maximize-score.py | {
"start": 369,
"end": 2753
} | class ____(object):
def maximumScore(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
def linear_sieve_of_eratosthenes(n): # Time: O(n), Space: O(n)
primes = []
spf = [-1]*(n+1) # the smallest prime factor
for i in xrange(2, n+1):
if spf[i] == -1:
spf[i] = i
primes.append(i)
for p in primes:
if i*p > n or p > spf[i]:
break
spf[i*p] = p
return primes # len(primes) = O(n/(logn-1)), reference: https://math.stackexchange.com/questions/264544/how-to-find-number-of-prime-numbers-up-to-to-n
lookup = {}
def count_of_distinct_prime_factors(x):
y = x
if y not in lookup:
cnt = 0
for p in primes:
if p*p > x:
break
if x%p != 0:
continue
cnt += 1
while x%p == 0:
x //= p
if x != 1:
cnt += 1
lookup[y] = cnt
return lookup[y]
primes = linear_sieve_of_eratosthenes(int(max(nums)**0.5))
scores = [count_of_distinct_prime_factors(x) for x in nums]
left = [-1]*len(scores)
stk = [-1]
for i in xrange(len(scores)):
while stk[-1] != -1 and scores[stk[-1]] < scores[i]: # if multiple such elements exist, choose the one with the smallest index
stk.pop()
left[i] = stk[-1]
stk.append(i)
right = [-1]*len(scores)
stk = [len(scores)]
for i in reversed(xrange(len(scores))):
while stk[-1] != len(scores) and scores[stk[-1]] <= scores[i]:
stk.pop()
right[i] = stk[-1]
stk.append(i)
result = 1
max_heap = [(-x, i) for i, x in enumerate(nums)]
heapq.heapify(max_heap)
while max_heap:
_, i = heapq.heappop(max_heap)
c = min((i-left[i])*(right[i]-i), k)
result = (result*pow(nums[i], c, MOD))%MOD
k -= c
if not k:
break
return result
| Solution |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 14011,
"end": 14214
} | class ____:
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
raise NotImplementedError(f"FX codegen not yet supported for type {type(self)}")
@dataclasses.dataclass
| WrapperLine |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/polling.py | {
"start": 1585,
"end": 3941
} | class ____(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=os.listdir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def on_thread_start(self):
self._snapshot = self._take_snapshot()
def queue_events(self, timeout):
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
try:
new_snapshot = self._take_snapshot()
except OSError:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
return
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
| PollingEmitter |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/sql/operators.py | {
"start": 675,
"end": 5496
} | class ____(Base):
__tablename__ = "a"
id: Mapped[int]
string: Mapped[str]
arr: Mapped[List[int]] = mapped_column(ARRAY(Integer))
lt1: "ColumnElement[bool]" = A.id > A.id
lt2: "ColumnElement[bool]" = A.id > 1
lt3: "ColumnElement[bool]" = 1 < A.id
le1: "ColumnElement[bool]" = A.id >= A.id
le2: "ColumnElement[bool]" = A.id >= 1
le3: "ColumnElement[bool]" = 1 <= A.id
eq1: "ColumnElement[bool]" = A.id == A.id
eq2: "ColumnElement[bool]" = A.id == 1
# eq3: "ColumnElement[bool]" = 1 == A.id
ne1: "ColumnElement[bool]" = A.id != A.id
ne2: "ColumnElement[bool]" = A.id != 1
# ne3: "ColumnElement[bool]" = 1 != A.id
gt1: "ColumnElement[bool]" = A.id < A.id
gt2: "ColumnElement[bool]" = A.id < 1
gt3: "ColumnElement[bool]" = 1 > A.id
ge1: "ColumnElement[bool]" = A.id <= A.id
ge2: "ColumnElement[bool]" = A.id <= 1
ge3: "ColumnElement[bool]" = 1 >= A.id
# TODO "in" doesn't seem to pick up the typing of __contains__?
# EXPECTED_MYPY: Incompatible types in assignment (expression has type "bool", variable has type "ColumnElement[bool]") # noqa: E501
contains1: "ColumnElement[bool]" = A.id in A.arr
# EXPECTED_MYPY: Incompatible types in assignment (expression has type "bool", variable has type "ColumnElement[bool]") # noqa: E501
contains2: "ColumnElement[bool]" = A.id in A.string
lshift1: "ColumnElement[int]" = A.id << A.id
lshift2: "ColumnElement[int]" = A.id << 1
lshift3: "ColumnElement[Any]" = A.string << 1
rshift1: "ColumnElement[int]" = A.id >> A.id
rshift2: "ColumnElement[int]" = A.id >> 1
rshift3: "ColumnElement[Any]" = A.string >> 1
concat1: "ColumnElement[str]" = A.string.concat(A.string)
concat2: "ColumnElement[str]" = A.string.concat(1)
concat3: "ColumnElement[str]" = A.string.concat("a")
like1: "ColumnElement[bool]" = A.string.like("test")
like2: "ColumnElement[bool]" = A.string.like("test", escape="/")
ilike1: "ColumnElement[bool]" = A.string.ilike("test")
ilike2: "ColumnElement[bool]" = A.string.ilike("test", escape="/")
in_: "ColumnElement[bool]" = A.id.in_([1, 2])
not_in: "ColumnElement[bool]" = A.id.not_in([1, 2])
not_like1: "ColumnElement[bool]" = A.string.not_like("test")
not_like2: "ColumnElement[bool]" = A.string.not_like("test", escape="/")
not_ilike1: "ColumnElement[bool]" = A.string.not_ilike("test")
not_ilike2: "ColumnElement[bool]" = A.string.not_ilike("test", escape="/")
is_: "ColumnElement[bool]" = A.string.is_("test")
is_not: "ColumnElement[bool]" = A.string.is_not("test")
startswith: "ColumnElement[bool]" = A.string.startswith("test")
endswith: "ColumnElement[bool]" = A.string.endswith("test")
contains: "ColumnElement[bool]" = A.string.contains("test")
match: "ColumnElement[bool]" = A.string.match("test")
regexp_match: "ColumnElement[bool]" = A.string.regexp_match("test")
regexp_replace: "ColumnElement[str]" = A.string.regexp_replace(
"pattern", "replacement"
)
between: "ColumnElement[bool]" = A.string.between("a", "b")
adds: "ColumnElement[str]" = A.string + A.string
add1: "ColumnElement[int]" = A.id + A.id
add2: "ColumnElement[int]" = A.id + 1
add3: "ColumnElement[int]" = 1 + A.id
add_date: "ColumnElement[dt.date]" = func.current_date() + dt.timedelta(days=1)
add_datetime: "ColumnElement[dt.datetime]" = (
func.current_timestamp() + dt.timedelta(seconds=1)
)
sub1: "ColumnElement[int]" = A.id - A.id
sub2: "ColumnElement[int]" = A.id - 1
sub3: "ColumnElement[int]" = 1 - A.id
mul1: "ColumnElement[int]" = A.id * A.id
mul2: "ColumnElement[int]" = A.id * 1
mul3: "ColumnElement[int]" = 1 * A.id
div1: "ColumnElement[float|Decimal]" = A.id / A.id
div2: "ColumnElement[float|Decimal]" = A.id / 1
div3: "ColumnElement[float|Decimal]" = 1 / A.id
mod1: "ColumnElement[int]" = A.id % A.id
mod2: "ColumnElement[int]" = A.id % 1
mod3: "ColumnElement[int]" = 1 % A.id
# unary
neg: "ColumnElement[int]" = -A.id
desc: "ColumnElement[int]" = A.id.desc()
asc: "ColumnElement[int]" = A.id.asc()
any_: "ColumnElement[bool]" = A.id.any_()
all_: "ColumnElement[bool]" = A.id.all_()
nulls_first: "ColumnElement[int]" = A.id.nulls_first()
nulls_last: "ColumnElement[int]" = A.id.nulls_last()
collate: "ColumnElement[str]" = A.string.collate("somelang")
distinct: "ColumnElement[int]" = A.id.distinct()
# custom ops
col = column("flags", Integer)
op_a: "ColumnElement[Any]" = col.op("&")(1)
op_b: "ColumnElement[int]" = col.op("&", return_type=Integer)(1)
op_c: "ColumnElement[str]" = col.op("&", return_type=String)("1")
op_d: "ColumnElement[int]" = col.op("&", return_type=BigInteger)("1")
op_e: "ColumnElement[bool]" = col.bool_op("&")("1")
op_a1 = col.op("&")(1)
assert_type(op_a1, BinaryExpression[Any])
# op functions
t1 = operators.eq(A.id, 1)
select().where(t1)
assert_type(col.op("->>")("field"), BinaryExpression[Any])
assert_type(
col.op("->>")("field").self_group(), BinaryExpression[Any] | Grouping[Any]
)
| A |
python | pandas-dev__pandas | pandas/tests/indexes/object/test_indexing.py | {
"start": 2520,
"end": 6392
} | class ____:
def test_get_indexer_non_unique_nas(self, nulls_fixture):
# even though this isn't non-unique, this should still work
index = Index(["a", "b", nulls_fixture], dtype=object)
indexer, missing = index.get_indexer_non_unique([nulls_fixture])
expected_indexer = np.array([2], dtype=np.intp)
expected_missing = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
# actually non-unique
index = Index(["a", nulls_fixture, "b", nulls_fixture], dtype=object)
indexer, missing = index.get_indexer_non_unique([nulls_fixture])
expected_indexer = np.array([1, 3], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
# matching-but-not-identical nans
if is_matching_na(nulls_fixture, float("NaN")):
index = Index(["a", float("NaN"), "b", float("NaN")], dtype=object)
match_but_not_identical = True
elif is_matching_na(nulls_fixture, Decimal("NaN")):
index = Index(["a", Decimal("NaN"), "b", Decimal("NaN")], dtype=object)
match_but_not_identical = True
else:
match_but_not_identical = False
if match_but_not_identical:
indexer, missing = index.get_indexer_non_unique([nulls_fixture])
expected_indexer = np.array([1, 3], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
def test_get_indexer_non_unique_np_nats(self, np_nat_fixture, np_nat_fixture2):
expected_missing = np.array([], dtype=np.intp)
# matching-but-not-identical nats
if is_matching_na(np_nat_fixture, np_nat_fixture2):
# ensure nats are different objects
index = Index(
np.array(
["2021-10-02", np_nat_fixture.copy(), np_nat_fixture2.copy()],
dtype=object,
),
dtype=object,
)
# pass as index to prevent target from being casted to DatetimeIndex
indexer, missing = index.get_indexer_non_unique(
Index([np_nat_fixture], dtype=object)
)
expected_indexer = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
# dt64nat vs td64nat
else:
try:
np_nat_fixture == np_nat_fixture2
except (TypeError, OverflowError):
# Numpy will raise on uncomparable types, like
# np.datetime64('NaT', 'Y') and np.datetime64('NaT', 'ps')
# https://github.com/numpy/numpy/issues/22762
return
index = Index(
np.array(
[
"2021-10-02",
np_nat_fixture,
np_nat_fixture2,
np_nat_fixture,
np_nat_fixture2,
],
dtype=object,
),
dtype=object,
)
# pass as index to prevent target from being casted to DatetimeIndex
indexer, missing = index.get_indexer_non_unique(
Index([np_nat_fixture], dtype=object)
)
expected_indexer = np.array([1, 3], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
| TestGetIndexerNonUnique |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 93748,
"end": 94901
} | class ____(DataDocumenterMixinBase):
"""Mixin for AttributeDocumenter to provide the feature for supporting non
data-descriptors.
.. note:: This mix-in must be inherited after other mix-ins. Otherwise, docstring
and :value: header will be suppressed unexpectedly.
"""
def import_object(self, raiseerror: bool = False) -> bool:
ret = super().import_object(raiseerror) # type: ignore[misc]
if ret and not inspect.isattributedescriptor(self.object):
self.non_data_descriptor = True
else:
self.non_data_descriptor = False
return ret
def should_suppress_value_header(self) -> bool:
return (
not getattr(self, 'non_data_descriptor', False)
or super().should_suppress_directive_header()
)
def get_doc(self) -> list[list[str]] | None:
if getattr(self, 'non_data_descriptor', False):
# the docstring of non datadescriptor is very probably the wrong thing
# to display
return None
else:
return super().get_doc() # type: ignore[misc]
| NonDataDescriptorMixin |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 133419,
"end": 134140
} | class ____(Operation):
def call(self, x):
return backend.numpy.log10(x)
def compute_output_spec(self, x):
dtype = (
backend.floatx()
if backend.standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.log10", "keras.ops.numpy.log10"])
def log10(x):
"""Return the base 10 logarithm of the input tensor, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise base 10 logarithm of `x`.
"""
if any_symbolic_tensors((x,)):
return Log10().symbolic_call(x)
return backend.numpy.log10(x)
| Log10 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 33126,
"end": 33345
} | class ____:
inside_string: bool = False
escape_next_character: bool = False
collected_string_chars: List[str] = field(default_factory=list)
last_parsed_key: Optional[str] = None
@dataclass
| StringParseState |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 15431,
"end": 15690
} | class ____:
def setup(self):
N = 500_000
cols = 500
self.df = DataFrame(np.random.rand(N, cols))
def time_setitem(self):
self.df[100] = 100
def time_setitem_list(self):
self.df[[100, 200, 300]] = 100
| Setitem |
python | numba__numba | numba/cuda/tests/cudadrv/test_emm_plugins.py | {
"start": 3845,
"end": 6683
} | class ____(CUDATestCase):
"""
Tests that the API of an EMM Plugin that implements device allocations
only is used correctly by Numba.
"""
def setUp(self):
super().setUp()
# Always start afresh with a new context and memory manager
cuda.close()
cuda.set_memory_manager(DeviceOnlyEMMPlugin)
def tearDown(self):
super().tearDown()
# Unset the memory manager for subsequent tests
cuda.close()
cuda.cudadrv.driver._memory_manager = None
def test_memalloc(self):
mgr = cuda.current_context().memory_manager
# Allocate an array and check that memalloc was called with the correct
# size.
arr_1 = np.arange(10)
d_arr_1 = cuda.device_array_like(arr_1)
self.assertTrue(mgr.memalloc_called)
self.assertEqual(mgr.count, 1)
self.assertEqual(mgr.allocations[1], arr_1.nbytes)
# Allocate again, with a different size, and check that it is also
# correct.
arr_2 = np.arange(5)
d_arr_2 = cuda.device_array_like(arr_2)
self.assertEqual(mgr.count, 2)
self.assertEqual(mgr.allocations[2], arr_2.nbytes)
# Remove the first array, and check that our finalizer was called for
# the first array only.
del d_arr_1
self.assertNotIn(1, mgr.allocations)
self.assertIn(2, mgr.allocations)
# Remove the second array and check that its finalizer was also
# called.
del d_arr_2
self.assertNotIn(2, mgr.allocations)
def test_initialized_in_context(self):
# If we have a CUDA context, it should already have initialized its
# memory manager.
self.assertTrue(cuda.current_context().memory_manager.initialized)
def test_reset(self):
ctx = cuda.current_context()
ctx.reset()
self.assertTrue(ctx.memory_manager.reset_called)
def test_get_memory_info(self):
ctx = cuda.current_context()
meminfo = ctx.get_memory_info()
self.assertTrue(ctx.memory_manager.get_memory_info_called)
self.assertEqual(meminfo.free, 32)
self.assertEqual(meminfo.total, 64)
@linux_only
def test_get_ipc_handle(self):
# We don't attempt to close the IPC handle in this test because Numba
# will be expecting a real IpcHandle object to have been returned from
# get_ipc_handle, and it would cause problems to do so.
arr = np.arange(2)
d_arr = cuda.device_array_like(arr)
ipch = d_arr.get_ipc_handle()
ctx = cuda.current_context()
self.assertTrue(ctx.memory_manager.get_ipc_handle_called)
self.assertIn("Dummy IPC handle for alloc 1", ipch._ipc_handle)
@skip_on_cudasim('EMM Plugins not supported on CUDA simulator')
| TestDeviceOnlyEMMPlugin |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/axis3d.py | {
"start": 29058,
"end": 29327
} | class ____(Axis):
axis_name = "z"
get_view_interval, set_view_interval = maxis._make_getset_interval(
"view", "zz_viewLim", "intervalx")
get_data_interval, set_data_interval = maxis._make_getset_interval(
"data", "zz_dataLim", "intervalx")
| ZAxis |
python | pytorch__pytorch | test/test_typing.py | {
"start": 4597,
"end": 7678
} | class ____(TestCase):
_lock = Lock()
_cached_output: Optional[dict[str, list[str]]] = None
@classmethod
def get_mypy_output(cls) -> dict[str, list[str]]:
with cls._lock:
if cls._cached_output is None:
cls._cached_output = _run_mypy()
return cls._cached_output
@parametrize(
"path",
get_test_cases(PASS_DIR),
name_fn=lambda b: os.path.relpath(b, start=PASS_DIR),
)
def test_success(self, path) -> None:
output_mypy = self.get_mypy_output()
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
raise AssertionError(msg)
@parametrize(
"path",
get_test_cases(FAIL_DIR),
name_fn=lambda b: os.path.relpath(b, start=FAIL_DIR),
)
def test_fail(self, path):
__tracebackhide__ = True # noqa: F841
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(str)
output_mypy = self.get_mypy_output()
self.assertIn(path, output_mypy)
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+):(?P<colno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group("lineno"))
errors[lineno] += f"{error_line}\n"
for i, line in enumerate(lines):
lineno = i + 1
if line.startswith("#") or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
self.assertIn(
"# E:", target_line, f"Unexpected mypy output\n\n{errors[lineno]}"
)
marker = target_line.split("# E:")[-1].strip()
expected_error = errors.get(lineno)
_test_fail(path, marker, expected_error, lineno)
@parametrize(
"path",
get_test_cases(REVEAL_DIR),
name_fn=lambda b: os.path.relpath(b, start=REVEAL_DIR),
)
def test_reveal(self, path):
__tracebackhide__ = True # noqa: F841
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = self.get_mypy_output()
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+):(?P<colno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
instantiate_parametrized_tests(TestTyping)
if __name__ == "__main__":
run_tests()
| TestTyping |
python | huggingface__transformers | tests/models/opt/test_modeling_opt.py | {
"start": 14632,
"end": 16191
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.path_model = "facebook/opt-350m"
def test_load_model(self):
try:
_ = OPTForCausalLM.from_pretrained(self.path_model)
except BaseException:
self.fail("Failed loading model")
def test_logits(self):
model = OPTForCausalLM.from_pretrained(self.path_model)
model = model.eval()
tokenizer = GPT2Tokenizer.from_pretrained(self.path_model)
prompts = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
inputs = tokenizer(prompts, return_tensors="pt", padding=True, add_special_tokens=False)
logits = model(inputs.input_ids, attention_mask=inputs.attention_mask)[0].mean(dim=-1)
logits_meta = torch.Tensor(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
]
)
assert torch.allclose(logits, logits_meta, atol=1e-4)
@slow
| OPTEmbeddingsTest |
python | apache__airflow | airflow-core/src/airflow/metrics/validators.py | {
"start": 10837,
"end": 11148
} | class ____(ListValidator):
"""Only allow names that do not match the blocked strings."""
def test(self, name: str) -> bool:
if self.validate_list is not None:
return not super()._has_pattern_match(name)
return True # default is all metrics are allowed
| PatternBlockListValidator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/integration/streams/test_credit_adjustments.py | {
"start": 513,
"end": 1848
} | class ____(StreamTestCase):
_STREAM_NAME = "credit_adjustments"
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
output = read_full_refresh(self._config, _STREAM_NAME)
assert len(output.records) == 1
@HttpMocker()
def test_given_multiple_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
self.stream_request().with_limit(250).with_next_page_token(NEXT_PAGE_TOKEN).build(),
get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME).with_pagination().with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
output = read_full_refresh(self._config, _STREAM_NAME)
assert len(output.records) == 2
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | ZoranPandovski__al-go-rithms | cryptography/porta_cipher/python/porta.py | {
"start": 119,
"end": 2732
} | class ____(Cipher):
"""The Porta Cipher is a polyalphabetic substitution cipher, and has a key consisting of a word e.g. 'FORTIFICATION'.
:param key: The keyword, any word or phrase will do. Must consist of alphabetical characters only, no punctuation of numbers.
"""
def __init__(self,key='FORTIFICATION'):
self.key = [k.upper() for k in key]
def encipher(self,string):
"""Encipher string using Porta cipher according to initialised key. Punctuation and whitespace
are removed from the input.
Example::
ciphertext = Porta('HELLO').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string.
"""
string = self.remove_punctuation(string)
ret = ''
for (i,c) in enumerate(string):
i = i%len(self.key)
if self.key[i] in 'AB': ret += 'NOPQRSTUVWXYZABCDEFGHIJKLM'[self.a2i(c)]
elif self.key[i] in 'YZ': ret += 'ZNOPQRSTUVWXYBCDEFGHIJKLMA'[self.a2i(c)]
elif self.key[i] in 'WX': ret += 'YZNOPQRSTUVWXCDEFGHIJKLMAB'[self.a2i(c)]
elif self.key[i] in 'UV': ret += 'XYZNOPQRSTUVWDEFGHIJKLMABC'[self.a2i(c)]
elif self.key[i] in 'ST': ret += 'WXYZNOPQRSTUVEFGHIJKLMABCD'[self.a2i(c)]
elif self.key[i] in 'QR': ret += 'VWXYZNOPQRSTUFGHIJKLMABCDE'[self.a2i(c)]
elif self.key[i] in 'OP': ret += 'UVWXYZNOPQRSTGHIJKLMABCDEF'[self.a2i(c)]
elif self.key[i] in 'MN': ret += 'TUVWXYZNOPQRSHIJKLMABCDEFG'[self.a2i(c)]
elif self.key[i] in 'KL': ret += 'STUVWXYZNOPQRIJKLMABCDEFGH'[self.a2i(c)]
elif self.key[i] in 'IJ': ret += 'RSTUVWXYZNOPQJKLMABCDEFGHI'[self.a2i(c)]
elif self.key[i] in 'GH': ret += 'QRSTUVWXYZNOPKLMABCDEFGHIJ'[self.a2i(c)]
elif self.key[i] in 'EF': ret += 'PQRSTUVWXYZNOLMABCDEFGHIJK'[self.a2i(c)]
elif self.key[i] in 'CD': ret += 'OPQRSTUVWXYZNMABCDEFGHIJKL'[self.a2i(c)]
return ret
def decipher(self,string):
"""Decipher string using Porta cipher according to initialised key. Punctuation and whitespace
are removed from the input. For the Porta cipher, enciphering and deciphering are the same operation.
Example::
plaintext = Porta('HELLO').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
"""
return self.encipher(string)
if __name__ == '__main__':
print('use "import pycipher" to access functions')
| Porta |
python | PrefectHQ__prefect | src/prefect/server/database/orm_models.py | {
"start": 43504,
"end": 44441
} | class ____(Base):
__table_args__: Any = (
sa.Index(
"uq_automation_bucket__automation_id__trigger_id__bucketing_key",
"automation_id",
"trigger_id",
"bucketing_key",
unique=True,
),
sa.Index(
"ix_automation_bucket__automation_id__end",
"automation_id",
"end",
),
)
automation_id: Mapped[uuid.UUID] = mapped_column(
sa.ForeignKey("automation.id", ondelete="CASCADE")
)
trigger_id: Mapped[uuid.UUID]
bucketing_key: Mapped[list[str]] = mapped_column(
JSON, server_default="[]", default=list
)
last_event: Mapped[Optional[ReceivedEvent]] = mapped_column(Pydantic(ReceivedEvent))
start: Mapped[DateTime]
end: Mapped[DateTime]
count: Mapped[int]
last_operation: Mapped[Optional[str]]
triggered_at: Mapped[Optional[DateTime]]
| AutomationBucket |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_annotated.py | {
"start": 336,
"end": 911
} | class ____:
a: Annotated[Optional[float], Color.RED] = None
b: Annotated[Optional[float], Color.BLUE] = None
x: Annotated[Optional[float], "foo", Color.RED] = None
y: Annotated[Optional[float], Color.BLUE, Color.RED] = None
def test1_alarm1(c: Test1_C) -> None:
c.a = 1.01
c.b = 1.01
_test_sink(c.a)
def test1_alarm2(c: Test1_C) -> None:
c.a = 1.01
c.b = 1.01
_test_sink(c.b)
def test1_alarm3(c: Test1_C) -> None:
c.x = 1.01
_test_sink(c.x)
def test1_alarm4(c: Test1_C) -> None:
c.y = 1.01
_test_sink(c.y)
| Test1_C |
python | wandb__wandb | wandb/automations/events.py | {
"start": 10304,
"end": 10426
} | class ____(_BaseEventInput):
scope: ProjectScope
"""The scope of the event: must be a project."""
| _BaseRunEventInput |
python | scipy__scipy | scipy/linalg/tests/test_matfuncs.py | {
"start": 1446,
"end": 3106
} | class ____:
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
[-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
[-4.08,0.56,-4.92,-7.6,0.56],
[-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
[4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
r = signm(a)
assert_array_almost_equal(r,cr)
def test_defective1(self):
a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
signm(a)
#XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2,-24.2,69.5,49.8,7.0],
[-9.2,5.2,-18.0,-16.8,-2.0],
[-10.0,6.0,-20.0,-18.0,-2.0],
[-9.6,9.6,-25.5,-15.4,-2.0],
[9.8,-4.8,18.0,18.2,2.0]))
signm(a)
#XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
signm(a)
#XXX: what would be the correct result?
| TestSignM |
python | getsentry__sentry | src/sentry/services/eventstore/processing/redis.py | {
"start": 308,
"end": 721
} | class ____(EventProcessingStore):
"""
Creates an instance of the processing store which uses a Redis Cluster
client as its backend.
"""
def __init__(self, **options: Any) -> None:
super().__init__(
KVStorageCodecWrapper(
RedisKVStorage(redis_clusters.get(options.pop("cluster", "default"))), JSONCodec()
)
)
| RedisClusterEventProcessingStore |
python | django__django | tests/migrations/test_migrations_squashed_extra/0001_squashed_0002.py | {
"start": 35,
"end": 176
} | class ____(migrations.Migration):
replaces = [
("migrations", "0001_initial"),
("migrations", "0002_second"),
]
| Migration |
python | pallets__werkzeug | examples/i18nurls/application.py | {
"start": 564,
"end": 1066
} | class ____(BaseRequest):
def __init__(self, environ, urls):
super().__init__(environ)
self.urls = urls
self.matched_url = None
def url_for(self, endpoint, **args):
if "lang_code" not in args:
args["lang_code"] = self.language
if endpoint == "this":
endpoint = self.matched_url[0]
tmp = self.matched_url[1].copy()
tmp.update(args)
args = tmp
return self.urls.build(endpoint, args)
| Request |
python | pyinstaller__pyinstaller | tests/functional/scripts/pyi_future.py | {
"start": 1714,
"end": 3362
} | class ____(list):
def append(self, item):
print('Adding an item')
super().append(item)
# Fix: this fails on 32-bit Python. The traceback::
#
# E:\pyinstaller>python tests\functional\scripts\pyi_future.py
# Traceback (most recent call last):
# File "tests\functional\scripts\pyi_future.py", line 66, in <module>
# for i in range(10**15)[:10]:
# File "C:\Users\user\python-2.7.10\lib\site-packages\future\types\newrange.py", line 122, in __getitem__
# return self.__getitem_slice(index)
# File "C:\Users\user\python-2.7.10\lib\site-packages\future\types\newrange.py", line 134, in __getitem_slice
# scaled_indices = (self._step * n for n in slce.indices(self._len))
# OverflowError: cannot fit 'long' into an index-sized integer
#
# So, pick a smaller (32-bit capable) range to iterate over.
#
# New iterable range object with slicing support
for i in range(2**30)[:10]:
pass
# Other iterators: map, zip, filter
my_iter = zip(range(3), ['a', 'b', 'c'])
assert my_iter != list(my_iter)
# The round() function behaves as it does in Python 3, using "Banker's Rounding" to the nearest even last digit:
assert round(0.1250, 2) == 0.12
# pow() supports fractional exponents of negative numbers like in Py3:
z = pow(-1, 0.5)
# Compatible output from isinstance() across Py2/3:
assert isinstance(2**64, int) # long integers
assert isinstance('blah', str)
assert isinstance('blah', str) # only if unicode_literals is in effect
# Py3-style iterators written as new-style classes (subclasses of future.types.newobject) are automatically
# backward-compatible with Py2:
| VerboseList |
python | huggingface__transformers | src/transformers/models/aimv2/modeling_aimv2.py | {
"start": 4647,
"end": 5348
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
| Aimv2MLP |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 11635,
"end": 12142
} | class ____(Predicate):
"""
Unit triangular matrix predicate.
Explanation
===========
A unit triangular matrix is a triangular matrix with 1s
on the diagonal.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.triangular(X), Q.unit_triangular(X))
True
"""
name = "unit_triangular"
handler = Dispatcher("UnitTriangularHandler", doc="Predicate fore key 'unit_triangular'.")
| UnitTriangularPredicate |
python | getsentry__sentry | tests/sentry/plugins/bases/test_issue.py | {
"start": 306,
"end": 1280
} | class ____(TestCase):
def _get_mock_user(self) -> mock.Mock:
user = mock.Mock(spec=User(id=1))
user.is_authenticated = False
return user
def test_requires_auth_provider(self) -> None:
user = self._get_mock_user()
p = IssueTrackingPlugin()
pytest.raises(AssertionError, p.get_auth_for_user, user)
def test_returns_none_on_missing_identity(self) -> None:
user = self._get_mock_user()
p = IssueTrackingPlugin()
p.auth_provider = "test"
self.assertEqual(p.get_auth_for_user(user), None)
def test_returns_identity(self) -> None:
user = User.objects.create(username="test", email="test@example.com")
auth = UserSocialAuth.objects.create(provider="test", user=user)
p = IssueTrackingPlugin()
p.auth_provider = "test"
got_auth = p.get_auth_for_user(user)
assert got_auth is not None
assert got_auth.id == auth.id
| GetAuthForUserTest |
python | bokeh__bokeh | src/bokeh/core/property_mixins.py | {
"start": 8792,
"end": 8994
} | class ____(HasProps):
''' Properties relevant to rendering images.
Mirrors the BokehJS ``properties.Image`` class.
'''
global_alpha = Alpha(help=_alpha_help % "images")
| ScalarImageProps |
python | bokeh__bokeh | src/bokeh/core/property/wrappers.py | {
"start": 10880,
"end": 18579
} | class ____(PropertyValueDict[Sequence[Any]]):
""" A property value container for ColumnData that supports change
notifications on mutating operations.
This property value container affords specialized code paths for
updating the .data dictionary for ColumnDataSource. When possible,
more efficient ColumnDataChangedEvent hints are generated to perform
the updates:
.. code-block:: python
x[i] = y
x.update
"""
# x[i] = y
# don't wrap with notify_owner --- notifies owners explicitly
def __setitem__(self, i, y):
return self.update([(i, y)])
def __copy__(self):
return PropertyValueColumnData(dict(self))
def __deepcopy__(self, memodict={}):
return PropertyValueColumnData(copy.deepcopy(dict(self), memodict))
# don't wrap with notify_owner --- notifies owners explicitly
def update(self, *args, **kwargs):
old = self._saved_copy()
# call dict.update directly, bypass wrapped version on base class
result = dict.update(self, *args, **kwargs)
from ...document.events import ColumnDataChangedEvent
# Grab keys to update according to Python docstring for update([E, ]**F)
#
# If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]
# If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v
# In either case, this is followed by: for k in F: D[k] = F[k]
cols = set(kwargs.keys())
if len(args) == 1:
E = args[0]
if hasattr(E, 'keys'):
cols |= set(E.keys())
else:
cols |= { x[0] for x in E }
# we must loop ourselves here instead of calling _notify_owners
# because the hint is customized for each owner separately
for (owner, descriptor) in self._owners:
hint = ColumnDataChangedEvent(owner.document, owner, "data", cols=list(cols))
descriptor._notify_mutated(owner, old, hint=hint)
return result
# don't wrap with notify_owner --- notifies owners explicitly
def _stream(self, doc: Document, source: ColumnarDataSource, new_data: dict[str, Sequence[Any] | npt.NDArray[Any]],
rollover: int | None = None, setter: Setter | None = None) -> None:
""" Internal implementation to handle special-casing stream events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.stream`` method on column data sources exists to provide a
more efficient way to perform streaming (i.e. append-only) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsStreamedEvent`` hint to the message containing
only the small streamed data that BokehJS needs in order to
efficiently synchronize.
.. warning::
This function assumes the integrity of ``new_data`` has already
been verified.
"""
old = self._saved_copy()
# TODO (bev) Currently this reports old differently for array vs list
# For arrays is reports the actual old value. For lists, the old value
# is actually the already updated value. This is because the method
# self._saved_copy() makes a shallow copy.
for k in new_data:
old_seq = self[k]
new_seq = new_data[k]
if isinstance(old_seq, np.ndarray) or isinstance(new_seq, np.ndarray):
# Special case for streaming with empty arrays, to allow this:
#
# data_source = ColumnDataSource(data={"DateTime": []})
# data_source.stream({"DateTime": np.array([np.datetime64("now")]))
#
# See https://github.com/bokeh/bokeh/issues/14004.
if len(old_seq) == 0:
seq = new_seq
elif len(new_seq) == 0:
seq = old_seq
else:
seq = np.append(old_seq, new_seq)
if rollover is not None and len(seq) > rollover:
seq = seq[len(seq) - rollover:]
# call dict.__setitem__ directly, bypass wrapped version on base class
dict.__setitem__(self, k, seq)
else:
def apply_rollover(seq: MutableSequence[Any]) -> None:
if rollover is not None and len(seq) > rollover:
del seq[:len(seq) - rollover]
if isinstance(old_seq, MutableSequence):
seq = old_seq
seq.extend(new_seq)
apply_rollover(seq)
else:
seq = [*old_seq, *new_seq]
apply_rollover(seq)
dict.__setitem__(self, k, seq)
from ...document.events import ColumnsStreamedEvent
self._notify_owners(old, hint=ColumnsStreamedEvent(doc, source, "data", new_data, rollover, setter))
# don't wrap with notify_owner --- notifies owners explicitly
def _patch(self, doc: Document, source: ColumnarDataSource, patches, setter: Setter | None = None) -> None:
""" Internal implementation to handle special-casing patch events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.patch`` method on column data sources exists to provide a
more efficient way to perform patching (i.e. random access) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsPatchedEvent`` hint to the message containing
only the small patched data that BokehJS needs in order to efficiently
synchronize.
.. warning::
This function assumes the integrity of ``patches`` has already
been verified.
"""
old = self._saved_copy()
for name, patch in patches.items():
for ind, value in patch:
if isinstance(ind, (int, slice)):
self[name][ind] = value
else:
shape = self[name][ind[0]][tuple(ind[1:])].shape
self[name][ind[0]][tuple(ind[1:])] = np.asarray(value).reshape(shape)
from ...document.events import ColumnsPatchedEvent
self._notify_owners(old, hint=ColumnsPatchedEvent(doc, source, "data", patches, setter))
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| PropertyValueColumnData |
python | doocs__leetcode | solution/0700-0799/0782.Transform to Chessboard/Solution.py | {
"start": 0,
"end": 1568
} | class ____:
def movesToChessboard(self, board: List[List[int]]) -> int:
def f(mask, cnt):
ones = mask.bit_count()
if n & 1:
if abs(n - 2 * ones) != 1 or abs(n - 2 * cnt) != 1:
return -1
if ones == n // 2:
return n // 2 - (mask & 0xAAAAAAAA).bit_count()
return (n + 1) // 2 - (mask & 0x55555555).bit_count()
else:
if ones != n // 2 or cnt != n // 2:
return -1
cnt0 = n // 2 - (mask & 0xAAAAAAAA).bit_count()
cnt1 = n // 2 - (mask & 0x55555555).bit_count()
return min(cnt0, cnt1)
n = len(board)
mask = (1 << n) - 1
rowMask = colMask = 0
for i in range(n):
rowMask |= board[0][i] << i
colMask |= board[i][0] << i
revRowMask = mask ^ rowMask
revColMask = mask ^ colMask
sameRow = sameCol = 0
for i in range(n):
curRowMask = curColMask = 0
for j in range(n):
curRowMask |= board[i][j] << j
curColMask |= board[j][i] << j
if curRowMask not in (rowMask, revRowMask) or curColMask not in (
colMask,
revColMask,
):
return -1
sameRow += curRowMask == rowMask
sameCol += curColMask == colMask
t1 = f(rowMask, sameRow)
t2 = f(colMask, sameCol)
return -1 if t1 == -1 or t2 == -1 else t1 + t2
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1393565,
"end": 1398487
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable, Reactable):
"""A release contains the content for a release."""
__schema__ = github_schema
__field_names__ = (
"author",
"created_at",
"description",
"description_html",
"is_draft",
"is_latest",
"is_prerelease",
"mentions",
"name",
"published_at",
"release_assets",
"repository",
"short_description_html",
"tag",
"tag_commit",
"tag_name",
"updated_at",
)
author = sgqlc.types.Field("User", graphql_name="author")
"""The author of the release"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The description of the release."""
description_html = sgqlc.types.Field(HTML, graphql_name="descriptionHTML")
"""The description of this release rendered to HTML."""
is_draft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDraft")
"""Whether or not the release is a draft"""
is_latest = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isLatest")
"""Whether or not the release is the latest releast"""
is_prerelease = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPrerelease")
"""Whether or not the release is a prerelease"""
mentions = sgqlc.types.Field(
UserConnection,
graphql_name="mentions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users mentioned in the release description
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
name = sgqlc.types.Field(String, graphql_name="name")
"""The title of the release."""
published_at = sgqlc.types.Field(DateTime, graphql_name="publishedAt")
"""Identifies the date and time when the release was created."""
release_assets = sgqlc.types.Field(
sgqlc.types.non_null(ReleaseAssetConnection),
graphql_name="releaseAssets",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("name", sgqlc.types.Arg(String, graphql_name="name", default=None)),
)
),
)
"""List of releases assets which are dependent on this release.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `name` (`String`): A list of names to filter the assets by.
"""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository that the release belongs to."""
short_description_html = sgqlc.types.Field(
HTML,
graphql_name="shortDescriptionHTML",
args=sgqlc.types.ArgDict((("limit", sgqlc.types.Arg(Int, graphql_name="limit", default=200)),)),
)
"""A description of the release, rendered to HTML without any links
in it.
Arguments:
* `limit` (`Int`): How many characters to return. (default: `200`)
"""
tag = sgqlc.types.Field(Ref, graphql_name="tag")
"""The Git tag the release points to"""
tag_commit = sgqlc.types.Field(Commit, graphql_name="tagCommit")
"""The tag commit for this release."""
tag_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="tagName")
"""The name of the release's Git tag"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| Release |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_captions.py | {
"start": 47076,
"end": 48282
} | class ____(util.MdCase):
"""Test Blocks caption cases with `auto` level."""
extension = ['pymdownx.blocks.caption']
extension_configs = {
'pymdownx.blocks.caption': {
'types': [
'caption',
{
'name': 'figure-caption',
'prefix': 'Figure <span class="caption-num">{}</span>.'
},
{
'name': 'table-caption',
'prefix': 'Table <span class="caption-num">{}</span>.'
}
]
}
}
def test_custom_prefix(self):
"""Test custom prefix."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption
This is the caption.
///
''',
R'''
<figure id="__figure-caption_1">
<p>A paragraph with a caption.</p>
<figcaption>
<p><span class="caption-prefix">Figure <span class="caption-num">1</span>.</span> This is the caption.</p>
</figcaption>
</figure>
''',
True
)
| TestBlocksCaptionCustomPrefix |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_task_runs.py | {
"start": 7429,
"end": 8524
} | class ____:
async def test_read_task_run(self, flow_run, task_run, client):
# make sure we we can read the task run correctly
response = await client.get(f"/task_runs/{task_run.id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(task_run.id)
assert response.json()["flow_run_id"] == str(flow_run.id)
async def test_read_flow_run_with_state(self, task_run, client, session):
state_id = uuid4()
(
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=states.State(id=state_id, type="RUNNING"),
)
).state
await client.get(f"/task_runs/{task_run.id}")
assert task_run.state.type.value == "RUNNING"
assert task_run.state.id == state_id
async def test_read_task_run_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/task_runs/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadTaskRun |
python | doocs__leetcode | solution/0400-0499/0410.Split Array Largest Sum/Solution.py | {
"start": 0,
"end": 404
} | class ____:
def splitArray(self, nums: List[int], k: int) -> int:
def check(mx):
s, cnt = inf, 0
for x in nums:
s += x
if s > mx:
s = x
cnt += 1
return cnt <= k
left, right = max(nums), sum(nums)
return left + bisect_left(range(left, right + 1), True, key=check)
| Solution |
python | openai__openai-python | src/openai/types/responses/tool_choice_allowed_param.py | {
"start": 256,
"end": 1107
} | class ____(TypedDict, total=False):
mode: Required[Literal["auto", "required"]]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: Required[Iterable[Dict[str, object]]]
"""A list of tool definitions that the model should be allowed to call.
For the Responses API, the list of tool definitions might look like:
```json
[
{ "type": "function", "name": "get_weather" },
{ "type": "mcp", "server_label": "deepwiki" },
{ "type": "image_generation" }
]
```
"""
type: Required[Literal["allowed_tools"]]
"""Allowed tool configuration type. Always `allowed_tools`."""
| ToolChoiceAllowedParam |
python | getsentry__sentry | src/sentry_plugins/slack/plugin.py | {
"start": 784,
"end": 10055
} | class ____(CorePluginMixin, notify.NotificationPlugin):
title = "Slack"
slug = "slack"
description = "Post notifications to a Slack channel."
conf_key = "slack"
required_field = "webhook"
feature_descriptions = [
FeatureDescription(
"""
Configure rule based Slack notifications to automatically be posted into a
specific channel. Want any error that's happening more than 100 times a
minute to be posted in `#critical-errors`? Setup a rule for it!
""",
IntegrationFeatures.ALERT_RULE,
)
]
def is_configured(self, project) -> bool:
return bool(self.get_option("webhook", project))
def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False):
return [
{
"name": "webhook",
"label": "Webhook URL",
"type": "url",
"placeholder": "e.g. https://hooks.slack.com/services/000000000/000000000/00000000000000000",
"required": True,
"help": "Your custom Slack webhook URL.",
},
{
"name": "username",
"label": "Bot Name",
"type": "string",
"placeholder": "e.g. Sentry",
"default": "Sentry",
"required": False,
"help": "The name used when publishing messages.",
},
{
"name": "icon_url",
"label": "Icon URL",
"type": "url",
"required": False,
"help": (
"The url of the icon to appear beside your bot (32px png), "
"leave empty for none.<br />You may use "
"http://myovchev.github.io/sentry-slack/images/logo32.png"
),
},
{
"name": "channel",
"label": "Destination",
"type": "string",
"placeholder": "e.g. #engineering",
"required": False,
"help": "Optional #channel name or @user",
},
{
"name": "custom_message",
"label": "Custom Message",
"type": "string",
"placeholder": "e.g. Hey <!everyone> there is something wrong",
"required": False,
"help": "Optional - Slack message formatting can be used",
},
{
"name": "include_tags",
"label": "Include Tags",
"type": "bool",
"required": False,
"help": "Include tags with notifications",
},
{
"name": "included_tag_keys",
"label": "Included Tags",
"type": "string",
"required": False,
"help": (
"Only include these tags (comma separated list). " "Leave empty to include all."
),
},
{
"name": "excluded_tag_keys",
"label": "Excluded Tags",
"type": "string",
"required": False,
"help": "Exclude these tags (comma separated list).",
},
{
"name": "include_rules",
"label": "Include Rules",
"type": "bool",
"required": False,
"help": "Include triggering rules with notifications.",
},
{
"name": "exclude_project",
"label": "Exclude Project Name",
"type": "bool",
"default": False,
"required": False,
"help": "Exclude project name with notifications.",
},
{
"name": "exclude_culprit",
"label": "Exclude Culprit",
"type": "bool",
"default": False,
"required": False,
"help": "Exclude culprit with notifications.",
},
]
def color_for_event(self, event):
return LEVEL_TO_COLOR.get(event.get_tag("level"), "error")
def _get_tags(self, event):
tag_list = event.tags
if not tag_list:
return ()
return (
(tagstore.backend.get_tag_key_label(k), tagstore.backend.get_tag_value_label(k, v))
for k, v in tag_list
)
def get_tag_list(self, name, project):
option = self.get_option(name, project)
if not option:
return None
return {tag.strip().lower() for tag in option.split(",")}
def notify(self, notification: Notification, raise_exception: bool = False) -> None:
event = notification.event
group = event.group
project = group.project
if not self.is_configured(project):
return
title = event.title[:SLACK_PLUGIN_TITLE_LENGTH_LIMIT].encode("utf-8")
# TODO(dcramer): we'd like this to be the event culprit, but Sentry
# does not currently retain it
if group.culprit:
culprit = group.culprit.encode("utf-8")
else:
culprit = None
project_name = project.get_full_name().encode("utf-8")
fields = []
# They can be the same if there is no culprit
# So we set culprit to an empty string instead of duplicating the text
if not self.get_option("exclude_culprit", project) and culprit and title != culprit:
fields.append({"title": "Culprit", "value": culprit, "short": False})
if not self.get_option("exclude_project", project):
fields.append({"title": "Project", "value": project_name, "short": True})
if self.get_option("custom_message", project):
fields.append(
{
"title": "Custom message",
"value": self.get_option("custom_message", project),
"short": False,
}
)
if self.get_option("include_rules", project):
rules = []
for rule in notification.rules:
rule_link = (
f"/{group.organization.slug}/{project.slug}/settings/alerts/rules/{rule.id}/"
)
# Make sure it's an absolute uri since we're sending this
# outside of Sentry into Slack
rule_link = absolute_uri(rule_link)
rules.append((rule_link, rule.label))
if rules:
value = ", ".join("<{} | {}>".format(*r) for r in rules)
fields.append(
{"title": "Triggered By", "value": value.encode("utf-8"), "short": False}
)
if self.get_option("include_tags", project):
included_tags = set(self.get_tag_list("included_tag_keys", project) or [])
excluded_tags = set(self.get_tag_list("excluded_tag_keys", project) or [])
for tag_key, tag_value in self._get_tags(event):
key = tag_key.lower()
std_key = tagstore.backend.get_standardized_key(key)
if included_tags and key not in included_tags and std_key not in included_tags:
continue
if excluded_tags and (key in excluded_tags or std_key in excluded_tags):
continue
fields.append(
{
"title": tag_key.encode("utf-8"),
"value": tag_value.encode("utf-8"),
"short": True,
}
)
payload = {
"attachments": [
{
"fallback": b"[%s] %s" % (project_name, title),
"title": title,
"title_link": group.get_absolute_url(params={"referrer": "slack"}),
"color": self.color_for_event(event),
"fields": fields,
}
]
}
client = self.get_client(project)
if client.username:
payload["username"] = client.username.encode("utf-8")
if client.channel:
payload["channel"] = client.channel
if client.icon_url:
payload["icon_url"] = client.icon_url
try:
client.request({"payload": json.dumps(payload)})
except ApiError as e:
# Ignore 404 and ignorable errors from slack webhooks.
if raise_exception or not (
e.text in IGNORABLE_SLACK_ERRORS or e.code in IGNORABLE_SLACK_ERROR_CODES
):
raise
def get_client(self, project):
webhook = self.get_option("webhook", project).strip()
# Apparently we've stored some bad data from before we used `URLField`.
username = (self.get_option("username", project) or "Sentry").strip()
icon_url = self.get_option("icon_url", project)
channel = (self.get_option("channel", project) or "").strip()
return SlackApiClient(webhook, username, icon_url, channel)
| SlackPlugin |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/decision_tree.py | {
"start": 9482,
"end": 10235
} | class ____(DecisionTree):
def _calculate_variance_reduction(self, y, y1, y2):
var_tot = calculate_variance(y)
var_1 = calculate_variance(y1)
var_2 = calculate_variance(y2)
frac_1 = len(y1) / len(y)
frac_2 = len(y2) / len(y)
# Calculate the variance reduction
variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)
return sum(variance_reduction)
def _mean_of_y(self, y):
value = np.mean(y, axis=0)
return value if len(value) > 1 else value[0]
def fit(self, X, y):
self._impurity_calculation = self._calculate_variance_reduction
self._leaf_value_calculation = self._mean_of_y
super(RegressionTree, self).fit(X, y)
| RegressionTree |
python | Textualize__textual | docs/examples/widgets/collapsible_nested.py | {
"start": 92,
"end": 355
} | class ____(App[None]):
def compose(self) -> ComposeResult:
with Collapsible(collapsed=False):
with Collapsible():
yield Label("Hello, world.")
if __name__ == "__main__":
app = CollapsibleApp()
app.run()
| CollapsibleApp |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/secrets.py | {
"start": 508,
"end": 796
} | class ____(str):
"""The use of this string subtype will prevent accidental prints of secret value to the console."""
@property
def _masked_value(self) -> str:
return "<SecretString: hidden>"
def __repr__(self) -> str:
return self._masked_value
| SecretString |
python | great-expectations__great_expectations | great_expectations/core/batch.py | {
"start": 22421,
"end": 27565
} | class ____(BatchRequestBase):
"""A RuntimeBatchRequest creates a Batch for a RuntimeDataConnector.
Instead of serving as a description of what data Great Expectations should
fetch, a RuntimeBatchRequest serves as a wrapper for data that is passed in
at runtime (as an in-memory dataframe, file/S3 path, or SQL query), with
user-provided identifiers for uniquely identifying the data.
---Documentation---
- https://docs.greatexpectations.io/docs/terms/batch_request/#runtimedataconnector-and-runtimebatchrequest
- https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_a_runtimedataconnector/
runtime_parameters will vary depending on the Datasource used with the data.
For a dataframe:
```python
{"batch_data": df}
```
For a path on a filesystem:
```python
{"path": "/path/to/data/file.csv"}
```
Args:
datasource_name: name of the Datasource used to connect to the data
data_connector_name: name of the DataConnector used to connect to the data
data_asset_name: name of the DataAsset used to connect to the data
runtime_parameters: a dictionary containing the data to process,
a path to the data, or a query, depending on the associated Datasource
batch_identifiers: a dictionary to serve as a persistent, unique
identifier for the data included in the Batch
batch_spec_passthrough: a dictionary of additional parameters that
the ExecutionEngine will use to obtain a specific set of data
Returns:
BatchRequest
"""
include_field_names: ClassVar[set[str]] = {
"datasource_name",
"data_connector_name",
"data_asset_name",
"runtime_parameters",
"batch_identifiers",
"batch_spec_passthrough",
}
def __init__( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
runtime_parameters: dict,
batch_identifiers: dict,
batch_spec_passthrough: dict | None = None,
) -> None:
self._validate_init_parameters(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
)
self._validate_runtime_batch_request_specific_init_parameters(
runtime_parameters=runtime_parameters,
batch_identifiers=batch_identifiers,
batch_spec_passthrough=batch_spec_passthrough,
)
super().__init__(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
runtime_parameters=runtime_parameters,
batch_identifiers=batch_identifiers,
batch_spec_passthrough=batch_spec_passthrough,
)
@staticmethod
def _validate_runtime_batch_request_specific_init_parameters(
runtime_parameters: dict | None,
batch_identifiers: dict | None,
batch_spec_passthrough: dict | None = None,
) -> None:
"""
We must have both or neither of runtime_parameters and batch_identifiers (but not either one of them).
This is strict equivalence ("if-and-only") condition ("exclusive NOR"); otherwise, ("exclusive OR") means error.
""" # noqa: E501 # FIXME CoP
if (not runtime_parameters and batch_identifiers) or (
runtime_parameters and not batch_identifiers
):
raise ValueError( # noqa: TRY003 # FIXME CoP
"It must be that either both runtime_parameters and batch_identifiers are present, or both are missing" # noqa: E501 # FIXME CoP
)
# if there is a value, make sure it is a dict
if runtime_parameters and not (isinstance(runtime_parameters, dict)):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The runtime_parameters must be a non-empty dict object.
The type given is "{type(runtime_parameters)!s}", which is an illegal type or an empty dictionary.""" # noqa: E501 # FIXME CoP
)
# if there is a value, make sure it is a dict
if batch_identifiers and not isinstance(batch_identifiers, dict):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type for batch_identifiers must be a dict object, with keys being identifiers defined in the
data connector configuration. The type given is "{type(batch_identifiers)!s}", which is illegal.""" # noqa: E501 # FIXME CoP
)
if batch_spec_passthrough and not (isinstance(batch_spec_passthrough, dict)):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type for batch_spec_passthrough must be a dict object. The type given is \
"{type(batch_spec_passthrough)!s}", which is illegal.
"""
)
# TODO: <Alex>The following class is to support the backward compatibility with the legacy design.</Alex> # noqa: E501 # FIXME CoP
| RuntimeBatchRequest |
python | google__pytype | pytype/tools/xref/testdata/subclass.py | {
"start": 101,
"end": 314
} | class ____:
#- @foo defines/binding FnFoo
#- FnFoo.node/kind function
def foo(self, x):
return 10
@staticmethod
#- @bar defines/binding FnBar
#- FnBar.node/kind function
def bar():
return 42
| A |
python | doocs__leetcode | solution/1800-1899/1810.Minimum Path Cost in a Hidden Grid/Solution.py | {
"start": 286,
"end": 1704
} | class ____(object):
def findShortestPath(self, master: 'GridMaster') -> int:
def dfs(i, j):
nonlocal target
if master.isTarget():
target = (i, j)
for dir, (a, b, ndir) in dirs.items():
x, y = i + a, j + b
if 0 <= x < N and 0 <= y < N and master.canMove(dir) and g[x][y] == -1:
g[x][y] = master.move(dir)
dfs(x, y)
master.move(ndir)
target = (-1, -1)
N = 200
INF = 0x3F3F3F3F
g = [[-1] * N for _ in range(N)]
dirs = {
'U': (-1, 0, 'D'),
'D': (1, 0, 'U'),
'L': (0, -1, 'R'),
'R': (0, 1, 'L'),
}
dfs(100, 100)
if target == (-1, -1):
return -1
q = [(0, 100, 100)]
dist = [[INF] * N for _ in range(N)]
dist[100][100] = 0
while q:
w, i, j = heappop(q)
if (i, j) == target:
return w
for a, b, _ in dirs.values():
x, y = i + a, j + b
if (
0 <= x < N
and 0 <= y < N
and g[x][y] != -1
and dist[x][y] > w + g[x][y]
):
dist[x][y] = w + g[x][y]
heappush(q, (dist[x][y], x, y))
return 0
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks_tests/components/databricks_asset_bundle/test_component.py | {
"start": 9559,
"end": 10266
} | class ____(TestOpCustomization):
def test_translation(
self,
attributes: Mapping[str, Any],
assertion: Callable[[OpSpec], bool],
databricks_config_path: str,
) -> None:
component = load_component_for_test(
DatabricksAssetBundleComponent,
{
"databricks_config_path": databricks_config_path,
"op": attributes,
"workspace": {
"host": TEST_DATABRICKS_WORKSPACE_HOST,
"token": TEST_DATABRICKS_WORKSPACE_TOKEN,
},
},
)
op = component.op
assert op
assert assertion(op)
| TestDatabricksOpCustomization |
python | astropy__astropy | astropy/time/core.py | {
"start": 126232,
"end": 127142
} | class ____(Exception):
pass
def _make_array(val, copy=COPY_IF_NEEDED):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
| ScaleValueError |
python | bottlepy__bottle | bottle.py | {
"start": 99199,
"end": 99825
} | class ____:
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
def _try_close(obj):
""" Call obj.close() if present and ignore exceptions """
try:
if hasattr(obj, 'close'):
obj.close()
except Exception:
pass
| _closeiter |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 23748,
"end": 23880
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo'
strategy_class = RedHatStrategy
| VirtuozzoLinuxHostname |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 559105,
"end": 559561
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteProjectV2"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2 = sgqlc.types.Field("ProjectV2", graphql_name="projectV2")
"""The deleted Project."""
| DeleteProjectV2Payload |
python | pytorch__pytorch | test/functorch/discover_coverage.py | {
"start": 21735,
"end": 26618
} | class ____:
def __init__(self, name):
self.name = name
self.opinfos = NAME_TO_OPINFO.get(name, None)
assert self.opinfos is None or len(self.opinfos) > 0
def has_opinfo(self):
return self.opinfos is not None
def __repr__(self):
return f'Operator("{self.name}")'
def __hash__(self):
return hash(self.name)
def no_opinfos_skip_test(self, test_name):
"""Returns NO if any opinfos have a skip or xfail for the test"""
if not self.has_opinfo():
return Support.UNKNOWN
for opinfo in self.opinfos:
for decorator in opinfo.decorators:
if not hasattr(decorator, "test_name"):
continue
if decorator.test_name != test_name:
continue
if is_decorateinfo_skip_or_xfail(decorator):
return Support.NO
return Support.YES
def any_opinfo_attr(self, attr):
if not self.has_opinfo():
raise RuntimeError
return any(getattr(opinfo, attr) for opinfo in self.opinfos)
def all_opinfo_attr(self, attr):
if not self.has_opinfo():
raise RuntimeError
return all(getattr(opinfo, attr) for opinfo in self.opinfos)
def supports_vjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VJP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test("test_vjp")
def supports_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test("test_vmap_exhaustive")
def supports_fast_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test("test_op_has_batch_rule")
def supports_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test("test_vmapvjp")
def supports_fast_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test("test_vmapvjp_has_batch_rule")
def supports_jvp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in JVP_EXEMPTIONS:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
if self.any_opinfo_attr("supports_autograd") and not self.all_opinfo_attr(
"supports_forward_ad"
):
return Support.NO
return self.no_opinfos_skip_test("test_jvp")
def supports_jvpvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
exemptions = {
# we have support (see OpInfo), testing artifact
"nn.functional.dropout2d",
"nn.functional.dropout",
# exception: we dont even support double backward for this
"nn.functional.hardswish",
"bernoulli", # this isn't differentiable
"normal", # not differentiable
}
if self.name in exemptions:
return Support.YES
return self.no_opinfos_skip_test("test_jvpvjp")
def _supports_vmapjvp_base(self, test):
if self.name in FACTORY_FNS:
return Support.YES
VMAPJVP_EXEMPTIONS = {
"prod", # dynamic (backward)
"nn.functional.batch_norm", # testing problem
"normal", # not actually problem, randomness testing artifact
"bernoulli", # not actually problem, randomness testing artifact
"nn.functional.dropout2d", # not actually problem, randomness testing artifact
"nn.functional.dropout", # not actually problem, randomness testing artifact
# Not a problem.
# It's just that the max_norm testing mutates inputs...
# (we have our own functorch variant of the OpInfo without max_norm)
"nn.functional.embedding",
}
if self.name in VMAPJVP_EXEMPTIONS:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
if self.any_opinfo_attr("supports_autograd") and not self.all_opinfo_attr(
"supports_forward_ad"
):
return Support.NO
return self.no_opinfos_skip_test(test)
def supports_vmapjvp(self):
return self._supports_vmapjvp_base("test_vmapjvpall")
def supports_fast_vmapjvp(self):
return self._supports_vmapjvp_base("test_vmapjvpall_has_batch_rule")
| Operator |
python | dask__distributed | distributed/diagnostics/websocket.py | {
"start": 186,
"end": 2513
} | class ____(SchedulerPlugin):
name = "websocket"
def __init__(self, socket, scheduler):
self.socket = socket
self.scheduler = scheduler
def restart(self, scheduler, **kwargs):
"""Run when the scheduler restarts itself"""
self.socket.send("restart", {})
def add_worker(self, scheduler=None, worker=None, **kwargs):
"""Run when a new worker enters the cluster"""
self.socket.send("add_worker", {"worker": worker})
def remove_worker(self, scheduler=None, worker=None, **kwargs):
"""Run when a worker leaves the cluster"""
self.socket.send("remove_worker", {"worker": worker})
def add_client(self, scheduler=None, client=None, **kwargs):
"""Run when a new client connects"""
self.socket.send("add_client", {"client": client})
def remove_client(self, scheduler=None, client=None, **kwargs):
"""Run when a client disconnects"""
self.socket.send("remove_client", {"client": client})
def update_graph(self, scheduler, client=None, **kwargs):
"""Run when a new graph / tasks enter the scheduler"""
self.socket.send("update_graph", {"client": client})
def transition(self, key, start, finish, *args, **kwargs):
"""Run whenever a task changes state
Parameters
----------
key : string
start : string
Start state of the transition.
One of released, waiting, processing, memory, error.
finish : string
Final state of the transition.
stimulus_id: string
ID of stimulus causing the transition.
*args, **kwargs : More options passed when transitioning
This may include worker ID, compute time, etc.
"""
if key not in self.scheduler.tasks:
return
kwargs["key"] = key
startstops = kwargs.get("startstops", [])
for startstop in startstops:
color = colors[startstop["action"]]
if type(color) is not str:
color = color(kwargs)
data = {
"key": key,
"name": key_split(key),
"color": color,
**kwargs,
**startstop,
}
self.socket.send("transition", data)
| WebsocketPlugin |
python | spyder-ide__spyder | spyder/plugins/findinfiles/widgets/combobox.py | {
"start": 943,
"end": 8718
} | class ____(SpyderComboBox):
"""
Non editable combo box handling the path locations of the FindOptions
widget.
"""
# Signals
sig_redirect_stdio_requested = Signal(bool)
def __init__(self, external_path_history=None, parent=None, id_=None):
super().__init__(parent)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.setEditable(False)
self.path = ''
self.project_path = ''
self.file_path = ''
self.external_path = ''
if id_ is not None:
self.ID = id_
self.addItem(_("Current working directory"))
self.addItem(_("Project"))
self.model().item(SearchInComboBoxItems.Project, 0).setEnabled(False)
self.addItem(_("Current file").replace('&', ''))
self.insertSeparator(SearchInComboBoxItems.FirstSeparator)
self.addItem(_("Select another directory"))
self.addItem(_("Clear the list of other directories"))
self.insertSeparator(SearchInComboBoxItems.SecondSeparator)
external_path_history = (
[] if external_path_history is None else external_path_history
)
if external_path_history:
for path in external_path_history:
self.add_external_path(path)
else:
self.set_state_other_dirs_items(False)
self.currentIndexChanged.connect(self.path_selection_changed)
self.view().installEventFilter(self)
def add_external_path(self, path):
"""
Adds an external path to the combobox if it exists on the file system.
If the path is already listed in the combobox, it is removed from its
current position and added back at the end. If the maximum number of
paths is reached, the oldest external path is removed from the list.
"""
if not osp.exists(path):
return
self.set_state_other_dirs_items(True)
self.removeItem(self.findText(path))
self.addItem(path)
self.setItemData(self.count() - 1, path, Qt.ToolTipRole)
while (
self.count() >
(MAX_PATH_HISTORY + SearchInComboBoxItems.ExternalPaths)
):
self.removeItem(SearchInComboBoxItems.ExternalPaths)
def get_external_paths(self):
"""Returns a list of the external paths listed in the combobox."""
return [
str(self.itemText(i))
for i in range(SearchInComboBoxItems.ExternalPaths, self.count())
]
def clear_external_paths(self):
"""Remove all the external paths listed in the combobox."""
while self.count() > SearchInComboBoxItems.ExternalPaths:
self.removeItem(SearchInComboBoxItems.ExternalPaths)
self.set_state_other_dirs_items(False)
def get_current_searchpath(self):
"""
Returns the path corresponding to the currently selected item
in the combobox.
"""
idx = self.currentIndex()
if idx == SearchInComboBoxItems.Cwd:
return self.path
elif idx == SearchInComboBoxItems.Project:
return self.project_path
elif idx == SearchInComboBoxItems.File:
return self.file_path
else:
return self.external_path
def set_current_searchpath_index(self, index):
"""Set the current index of this combo box."""
if index is not None:
index = min(index, self.count() - 1)
if index in [SearchInComboBoxItems.ClearList,
SearchInComboBoxItems.SelectAnotherDirectory]:
index = SearchInComboBoxItems.Cwd
else:
index = SearchInComboBoxItems.Cwd
self.setCurrentIndex(index)
def is_file_search(self):
"""Returns whether the current search path is a file."""
if self.currentIndex() == SearchInComboBoxItems.File:
return True
else:
return False
@Slot()
def path_selection_changed(self):
"""Handles when the current index of the combobox changes."""
idx = self.currentIndex()
if idx == SearchInComboBoxItems.SelectAnotherDirectory:
external_path = self.select_directory()
if len(external_path) > 0:
self.add_external_path(external_path)
self.setCurrentIndex(self.count() - 1)
else:
self.setCurrentIndex(SearchInComboBoxItems.Cwd)
elif idx == SearchInComboBoxItems.ClearList:
reply = QMessageBox.question(
self, _("Clear other directories"),
_("Do you want to clear the list of other directories?"),
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
self.clear_external_paths()
self.setCurrentIndex(SearchInComboBoxItems.Cwd)
elif idx >= SearchInComboBoxItems.ExternalPaths:
self.external_path = str(self.itemText(idx))
@Slot()
def select_directory(self):
"""Select directory"""
self.sig_redirect_stdio_requested.emit(False)
directory = getexistingdirectory(
self,
_("Select directory"),
self.path,
)
if directory:
directory = to_unicode_from_fs(osp.abspath(directory))
self.sig_redirect_stdio_requested.emit(True)
return directory
def set_project_path(self, path):
"""
Sets the project path and disables the project search in the combobox
if the value of path is None.
"""
if path is None:
self.project_path = ''
self.model().item(
SearchInComboBoxItems.Project, 0
).setEnabled(False)
if self.currentIndex() == SearchInComboBoxItems.Project:
self.setCurrentIndex(SearchInComboBoxItems.Cwd)
else:
path = osp.abspath(path)
self.project_path = path
self.model().item(
SearchInComboBoxItems.Project, 0
).setEnabled(True)
def set_state_other_dirs_items(self, enabled):
"""
Set the enabled/visible state of items that change when other
directories are added/removed to/from the combobox.
"""
# The second separator needs to be visible only when the user has added
# other directories.
self.view().setRowHidden(
SearchInComboBoxItems.SecondSeparator, not enabled
)
# The ClearList item needs to be disabled if the user has not added
# other directories
self.model().item(
SearchInComboBoxItems.ClearList, 0
).setEnabled(enabled)
def eventFilter(self, widget, event):
"""Used to handle key events on the QListView of the combobox."""
if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Delete:
index = self.view().currentIndex().row()
if index >= SearchInComboBoxItems.ExternalPaths:
# Remove item and update the view.
self.removeItem(index)
self.showPopup()
# Set the view selection so that it doesn't bounce around.
new_index = min(self.count() - 1, index)
if new_index < SearchInComboBoxItems.ExternalPaths:
new_index = SearchInComboBoxItems.Cwd
self.set_state_other_dirs_items(False)
self.hidePopup()
self.view().setCurrentIndex(self.model().index(new_index, 0))
self.setCurrentIndex(new_index)
return True
return super().eventFilter(widget, event)
| SearchInComboBox |
python | ray-project__ray | ci/pipeline/determine_tests_to_run.py | {
"start": 1376,
"end": 4803
} | class ____:
def __init__(
self,
tags: List[str],
lineno: int,
dirs: Optional[List[str]] = None,
files: Optional[List[str]] = None,
patterns: Optional[List[str]] = None,
):
self.tags = set(tags)
self.lineno = lineno
self.dirs = dirs or []
self.patterns = patterns or []
self.files = files or []
def match(self, changed_file: str) -> bool:
for dir_name in self.dirs:
if changed_file == dir_name or changed_file.startswith(dir_name + "/"):
return True
for file in self.files:
if changed_file == file:
return True
for pattern in self.patterns:
if fnmatch.fnmatch(changed_file, pattern):
return True
return False
def match_tags(self, changed_file: str) -> Tuple[Set[str], bool]:
if self.match(changed_file):
return self.tags, True
return set(), False
def _parse_rules(rule_content: str) -> Tuple[Set[str], List[TagRule]]:
"""
Parse the rule config content into a list ot TagRule's.
The rule content is a string with the following format:
```
# Comment content, after '#', will be ignored.
# Empty lines will be ignored too.
dir/ # Directory to match
file # File to match
dir/*.py # Pattern to match, using fnmatch, matches dir/a.py dir/dir/b.py or dir/.py
@ tag1 tag2 tag3 # Tags to emit for a rule. A rule without tags is a skipping rule.
; # Semicolon to separate rules
```
Rules are evaluated in order, and the first matched rule will be used.
"""
rules: List[TagRule] = []
tag_defs: Set[str] = set()
tag_defs_ended: bool = False
tags: Set[str] = set()
dirs: List[str] = []
files: List[str] = []
patterns: List[str] = []
lines = rule_content.splitlines()
lineno = 0
for line in lines:
lineno += 1
line = line.strip()
if not line or line.startswith("#"):
continue # Skip empty lines and comments.
comment_index = line.find("#") # Find the first '#' to remove comments.
if comment_index != -1:
line = line[:comment_index].strip() # Remove comments.
if line.startswith("!"):
if tag_defs_ended:
raise ValueError("Tag must be declared at file start.")
tag_defs.update(line[1:].split())
continue
if not tag_defs_ended:
tag_defs_ended = True
if line.startswith("@"): # tags.
# Strip the leading '@' and split into tags.
tags.update(line[1:].split())
elif line.startswith(";"): # End of a rule.
if line != ";":
raise ValueError(f"Unexpected tokens after semicolon on line {lineno}.")
rules.append(TagRule(tags, lineno, dirs, files, patterns))
tags, dirs, files, patterns = set(), [], [], []
else:
if line.find("*") != -1: # Patterns.
patterns.append(line)
elif line.endswith("/"): # Directories.
dirs.append(line[:-1])
else: # Files.
files.append(line)
# Append the last rule if not empty.
if tags or dirs or files or patterns:
rules.append(TagRule(tags, lineno, dirs, files, patterns))
return tag_defs, rules
| TagRule |
python | astropy__astropy | astropy/visualization/stretch.py | {
"start": 23212,
"end": 24406
} | class ____(BaseStretch):
"""
A histogram equalization stretch.
Parameters
----------
data : array-like
The data defining the equalization.
values : array-like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
# Assume data is not necessarily normalized at this point
self.data = np.sort(data.ravel())
self.data = self.data[np.isfinite(self.data)]
vmin = self.data.min()
vmax = self.data.max()
self.data = (self.data - vmin) / (vmax - vmin)
# Compute relative position of each pixel
if values is None:
self.values = np.linspace(0.0, 1.0, len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.data, self.values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedHistEqStretch(self.data, values=self.values)
| HistEqStretch |
python | numba__numba | numba/core/ir.py | {
"start": 40713,
"end": 51901
} | class ____(object):
def __init__(self, blocks, is_generator, func_id, loc,
definitions, arg_count, arg_names):
self.blocks = blocks
self.is_generator = is_generator
self.func_id = func_id
self.loc = loc
self.arg_count = arg_count
self.arg_names = arg_names
self._definitions = definitions
self._reset_analysis_variables()
def equal_ir(self, other):
""" Checks that the IR contained within is equal to the IR in other.
Equality is defined by being equal in fundamental structure (blocks,
labels, IR node type and the order in which they are defined) and the
IR nodes being equal. IR node equality essentially comes down to
ensuring a node's `.__dict__` or `.__slots__` is equal, with the
exception of ignoring 'loc' and 'scope' entries. The upshot is that the
comparison is essentially location and scope invariant, but otherwise
behaves as unsurprisingly as possible.
"""
if type(self) is type(other):
return self.blocks == other.blocks
return False
def diff_str(self, other):
"""
Compute a human readable difference in the IR, returns a formatted
string ready for printing.
"""
msg = []
for label, block in self.blocks.items():
other_blk = other.blocks.get(label, None)
if other_blk is not None:
if block != other_blk:
msg.append(("Block %s differs" % label).center(80, '-'))
# see if the instructions are just a permutation
block_del = [x for x in block.body if isinstance(x, Del)]
oth_del = [x for x in other_blk.body if isinstance(x, Del)]
if block_del != oth_del:
# this is a common issue, dels are all present, but
# order shuffled.
if sorted(block_del) == sorted(oth_del):
msg.append(("Block %s contains the same dels but "
"their order is different") % label)
if len(block.body) > len(other_blk.body):
msg.append("This block contains more statements")
elif len(block.body) < len(other_blk.body):
msg.append("Other block contains more statements")
# find the indexes where they don't match
tmp = []
for idx, stmts in enumerate(zip(block.body,
other_blk.body)):
b_s, o_s = stmts
if b_s != o_s:
tmp.append(idx)
def get_pad(ablock, l):
pointer = '-> '
sp = len(pointer) * ' '
pad = []
nstmt = len(ablock)
for i in range(nstmt):
if i in tmp:
item = pointer
elif i >= l:
item = pointer
else:
item = sp
pad.append(item)
return pad
min_stmt_len = min(len(block.body), len(other_blk.body))
with StringIO() as buf:
it = [("self", block), ("other", other_blk)]
for name, _block in it:
buf.truncate(0)
_block.dump(file=buf)
stmts = buf.getvalue().splitlines()
pad = get_pad(_block.body, min_stmt_len)
title = ("%s: block %s" % (name, label))
msg.append(title.center(80, '-'))
msg.extend(["{0}{1}".format(a, b) for a, b in
zip(pad, stmts)])
if msg == []:
msg.append("IR is considered equivalent.")
return '\n'.join(msg)
def _reset_analysis_variables(self):
self._consts = consts.ConstantInference(self)
# Will be computed by PostProcessor
self.generator_info = None
self.variable_lifetime = None
# { ir.Block: { variable names (potentially) alive at start of block } }
self.block_entry_vars = {}
def derive(self, blocks, arg_count=None, arg_names=None,
force_non_generator=False, loc=None):
"""
Derive a new function IR from this one, using the given blocks,
and possibly modifying the argument count and generator flag.
Post-processing will have to be run again on the new IR.
"""
firstblock = blocks[min(blocks)]
new_ir = copy.copy(self)
new_ir.blocks = blocks
new_ir.loc = firstblock.loc if loc is None else loc
if force_non_generator:
new_ir.is_generator = False
if arg_count is not None:
new_ir.arg_count = arg_count
if arg_names is not None:
new_ir.arg_names = arg_names
new_ir._reset_analysis_variables()
# Make fresh func_id
new_ir.func_id = new_ir.func_id.derive()
return new_ir
def copy(self):
new_ir = copy.copy(self)
blocks = {}
block_entry_vars = {}
for label, block in self.blocks.items():
new_block = block.copy()
blocks[label] = new_block
if block in self.block_entry_vars:
block_entry_vars[new_block] = self.block_entry_vars[block]
new_ir.blocks = blocks
new_ir.block_entry_vars = block_entry_vars
return new_ir
def get_block_entry_vars(self, block):
"""
Return a set of variable names possibly alive at the beginning of
the block.
"""
return self.block_entry_vars[block]
def infer_constant(self, name):
"""
Try to infer the constant value of a given variable.
"""
if isinstance(name, Var):
name = name.name
return self._consts.infer_constant(name)
def get_definition(self, value, lhs_only=False):
"""
Get the definition site for the given variable name or instance.
A Expr instance is returned by default, but if lhs_only is set
to True, the left-hand-side variable is returned instead.
"""
lhs = value
while True:
if isinstance(value, Var):
lhs = value
name = value.name
elif isinstance(value, str):
lhs = value
name = value
else:
return lhs if lhs_only else value
defs = self._definitions[name]
if len(defs) == 0:
raise KeyError("no definition for %r"
% (name,))
if len(defs) > 1:
raise KeyError("more than one definition for %r"
% (name,))
value = defs[0]
def get_assignee(self, rhs_value, in_blocks=None):
"""
Finds the assignee for a given RHS value. If in_blocks is given the
search will be limited to the specified blocks.
"""
if in_blocks is None:
blocks = self.blocks.values()
elif isinstance(in_blocks, int):
blocks = [self.blocks[in_blocks]]
else:
blocks = [self.blocks[blk] for blk in list(in_blocks)]
assert isinstance(rhs_value, AbstractRHS)
for blk in blocks:
for assign in blk.find_insts(Assign):
if assign.value == rhs_value:
return assign.target
raise ValueError("Could not find an assignee for %s" % rhs_value)
def dump(self, file=None):
nofile = file is None
# Avoid early bind of sys.stdout as default value
file = file or StringIO()
for offset, block in sorted(self.blocks.items()):
print('label %s:' % (offset,), file=file)
block.dump(file=file)
if nofile:
text = file.getvalue()
if config.HIGHLIGHT_DUMPS:
try:
import pygments
except ImportError:
msg = "Please install pygments to see highlighted dumps"
raise ValueError(msg)
else:
from pygments import highlight
from numba.misc.dump_style import NumbaIRLexer as lexer
from numba.misc.dump_style import by_colorscheme
from pygments.formatters import Terminal256Formatter
print(highlight(text, lexer(), Terminal256Formatter(
style=by_colorscheme())))
else:
print(text)
def dump_to_string(self):
with StringIO() as sb:
self.dump(file=sb)
return sb.getvalue()
def dump_generator_info(self, file=None):
file = file or sys.stdout
gi = self.generator_info
print("generator state variables:", sorted(gi.state_vars), file=file)
for index, yp in sorted(gi.yield_points.items()):
print("yield point #%d: live variables = %s, weak live variables = %s"
% (index, sorted(yp.live_vars), sorted(yp.weak_live_vars)),
file=file)
def render_dot(self, filename_prefix="numba_ir", include_ir=True):
"""Render the CFG of the IR with GraphViz DOT via the
``graphviz`` python binding.
Returns
-------
g : graphviz.Digraph
Use `g.view()` to open the graph in the default PDF application.
"""
try:
import graphviz as gv
except ImportError:
raise ImportError(
"The feature requires `graphviz` but it is not available. "
"Please install with `pip install graphviz`"
)
g = gv.Digraph(
filename="{}{}.dot".format(
filename_prefix,
self.func_id.unique_name,
)
)
# Populate the nodes
for k, blk in self.blocks.items():
with StringIO() as sb:
blk.dump(sb)
label = sb.getvalue()
if include_ir:
label = ''.join(
[r' {}\l'.format(x) for x in label.splitlines()],
)
label = r"block {}\l".format(k) + label
g.node(str(k), label=label, shape='rect')
else:
label = r"{}\l".format(k)
g.node(str(k), label=label, shape='circle')
# Populate the edges
for src, blk in self.blocks.items():
for dst in blk.terminator.get_targets():
g.edge(str(src), str(dst))
return g
# A stub for undefined global reference
| FunctionIR |
python | kamyu104__LeetCode-Solutions | Python/valid-word.py | {
"start": 38,
"end": 552
} | class ____(object):
def isValid(self, word):
"""
:type word: str
:rtype: bool
"""
VOWELS = "aeiou"
if len(word) < 3:
return False
vowel = consonant = False
for x in word:
if x.isalpha():
if x.lower() in VOWELS:
vowel = True
else:
consonant = True
elif not x.isdigit():
return False
return vowel and consonant
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 13059,
"end": 13418
} | class ____:
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=["A"])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col["A"]
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
| GetItemSingleColumn |
python | keon__algorithms | algorithms/stack/stack.py | {
"start": 614,
"end": 1174
} | class ____(metaclass=ABCMeta):
"""Abstract Class for Stacks."""
def __init__(self):
self._top = -1
def __len__(self):
return self._top + 1
def __str__(self):
result = " ".join(map(str, self))
return 'Top-> ' + result
def is_empty(self):
return self._top == -1
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def push(self, value):
pass
@abstractmethod
def pop(self):
pass
@abstractmethod
def peek(self):
pass
| AbstractStack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 1562,
"end": 1692
} | class ____(Class6[T_co, T_contra]): ...
# This should generate an error because T_co isn't
# compatible with T_contra.
| Class6_Child1 |
python | ray-project__ray | python/ray/tests/test_advanced_9.py | {
"start": 9974,
"end": 16438
} | class ____:
pass
A.options(name="a", lifetime="detached").remote()
print(ray.get([use_gpu.remote(), use_gpu.remote()]))
"""
proc = run_string_as_driver_nonblocking(script)
gcs_cli = ray._raylet.GcsClient(address=f"{call_ray_start}")
def check_demands(n):
status = gcs_cli.internal_kv_get(
ray._private.ray_constants.DEBUG_AUTOSCALING_STATUS.encode(), namespace=None
)
import json
status = json.loads(status.decode())
return len(status["load_metrics_report"]["resource_demand"]) == n
wait_for_condition(lambda: check_demands(2))
proc.terminate()
wait_for_condition(lambda: check_demands(1))
@pytest.mark.skipif(external_redis_test_enabled(), reason="Only valid in non redis env")
def test_redis_not_available(monkeypatch, call_ray_stop_only):
monkeypatch.setenv("RAY_redis_db_connect_retries", "5")
monkeypatch.setenv("RAY_REDIS_ADDRESS", "localhost:12345")
p = subprocess.run(
"ray start --head",
shell=True,
capture_output=True,
)
assert "Could not establish connection to Redis" in p.stderr.decode()
assert "Please check " in p.stderr.decode()
assert "redis storage is alive or not." in p.stderr.decode()
@pytest.mark.skipif(not external_redis_test_enabled(), reason="Only valid in redis env")
def test_redis_wrong_password(monkeypatch, external_redis, call_ray_stop_only):
monkeypatch.setenv("RAY_redis_db_connect_retries", "5")
p = subprocess.run(
"ray start --head --redis-password=1234",
shell=True,
capture_output=True,
)
assert "RedisError: ERR AUTH <password> called" in p.stderr.decode()
@pytest.mark.skipif(not external_redis_test_enabled(), reason="Only valid in redis env")
def test_redis_full(ray_start_cluster_head):
import redis
gcs_address = ray_start_cluster_head.gcs_address
redis_addr = os.environ["RAY_REDIS_ADDRESS"]
host, port = parse_address(redis_addr)
if os.environ.get("TEST_EXTERNAL_REDIS_REPLICAS", "1") != "1":
cli = redis.RedisCluster(host, int(port))
else:
cli = redis.Redis(host, int(port))
# Set the max memory to 10MB
cli.config_set("maxmemory", 5 * 1024 * 1024)
gcs_cli = ray._raylet.GcsClient(address=gcs_address)
# GCS should fail
# GcsClient assumes GCS is HA so it keeps retrying, although GCS is down. We must
# set timeout for this.
with pytest.raises(ray.exceptions.RpcError):
gcs_cli.internal_kv_put(b"A", b"A" * 6 * 1024 * 1024, True, timeout=5)
logs_dir = ray_start_cluster_head.head_node._logs_dir
with open(os.path.join(logs_dir, "gcs_server.err")) as err:
assert "OOM command not allowed when used" in err.read()
def test_omp_threads_set_third_party(ray_start_cluster, monkeypatch):
###########################
# Test the OMP_NUM_THREADS are picked up by 3rd party libraries
# when running tasks if no OMP_NUM_THREADS is set by user.
# e.g. numpy, numexpr
###########################
with monkeypatch.context() as m:
m.delenv("OMP_NUM_THREADS", raising=False)
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
@ray.remote(num_cpus=2)
def f():
# Assert numpy using 2 threads for it's parallelism backend.
import numpy # noqa: F401
from threadpoolctl import threadpool_info
for pool_info in threadpool_info():
assert pool_info["num_threads"] == 2
import numexpr
assert numexpr.nthreads == 2
return True
assert ray.get(f.remote())
def test_gcs_fd_usage(shutdown_only):
ray.init(
_system_config={
"prestart_worker_first_driver": False,
"enable_worker_prestart": False,
},
)
gcs_process = ray._private.worker._global_node.all_processes["gcs_server"][0]
gcs_process = psutil.Process(gcs_process.process.pid)
print("GCS connections", len(gcs_process.connections()))
@ray.remote(runtime_env={"env_vars": {"Hello": "World"}})
class A:
def f(self):
return os.environ.get("Hello")
# In case there are still some pre-start workers, consume all of them
aa = [A.remote() for _ in range(32)]
for a in aa:
assert ray.get(a.f.remote()) == "World"
base_fd_num = len(gcs_process.connections())
print("GCS connections", base_fd_num)
bb = [A.remote() for _ in range(16)]
for b in bb:
assert ray.get(b.f.remote()) == "World"
new_fd_num = len(gcs_process.connections())
print("GCS connections", new_fd_num)
# each worker has two connections:
# GCS -> CoreWorker
# CoreWorker -> GCS
# Sometimes, there is one more sockets opened. The reason
# is still unknown.
assert (new_fd_num - base_fd_num) <= len(bb) * 2 + 1
@pytest.mark.skipif(
sys.platform != "linux", reason="jemalloc is only prebuilt on linux"
)
def test_jemalloc_ray_start(monkeypatch, ray_start_cluster):
def check_jemalloc_enabled(pid=None):
if pid is None:
pid = os.getpid()
pmap = subprocess.run(
["pmap", str(pid)], check=True, text=True, stdout=subprocess.PIPE
)
return "libjemalloc.so" in pmap.stdout
# Firstly, remove the LD_PRELOAD and make sure
# jemalloc is loaded.
monkeypatch.delenv("LD_PRELOAD", False)
cluster = ray_start_cluster
node = cluster.add_node(num_cpus=1)
# Make sure raylet/gcs/worker all have jemalloc
assert check_jemalloc_enabled(
node.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.pid
)
assert check_jemalloc_enabled(
node.all_processes[ray_constants.PROCESS_TYPE_RAYLET][0].process.pid
)
assert not ray.get(ray.remote(check_jemalloc_enabled).remote())
ray.shutdown()
cluster.shutdown()
monkeypatch.setenv("LD_PRELOAD", "")
node = cluster.add_node(num_cpus=1)
# Make sure raylet/gcs/worker all have jemalloc
assert not check_jemalloc_enabled(
node.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.pid
)
assert not check_jemalloc_enabled(
node.all_processes[ray_constants.PROCESS_TYPE_RAYLET][0].process.pid
)
assert not ray.get(ray.remote(check_jemalloc_enabled).remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| A |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 36564,
"end": 42520
} | class ____(
_UserDefinedTypeFixture, fixtures.TablesTest, AssertsCompiledSQL
):
run_create_tables = None
run_inserts = None
run_deletes = None
"""tests user-defined types."""
def test_typedecorator_literal_render(self):
class MyType(types.TypeDecorator):
impl = String
cache_ok = True
def process_literal_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select(literal("test", MyType)),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect="default",
literal_binds=True,
)
def test_kw_colspec(self):
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw["type_expression"].name
class MyOtherType(types.UserDefinedType):
def get_col_spec(self):
return "BAR"
t = Table("t", MetaData(), Column("bar", MyType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar FOOB bar NOT NULL")
t = Table("t", MetaData(), Column("bar", MyOtherType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar BAR NOT NULL")
def test_typedecorator_literal_render_fallback_bound(self):
# fall back to process_bind_param for literal
# value rendering.
class MyType(types.TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select(literal("test", MyType)),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect="default",
literal_binds=True,
)
def test_typedecorator_impl(self):
for impl_, exp, kw in [
(Float, "FLOAT", {}),
(Float, "FLOAT(2)", {"precision": 2}),
(Float(2), "FLOAT(2)", {"precision": 4}),
(Numeric(19, 2), "NUMERIC(19, 2)", {}),
]:
for dialect_ in (
dialects.postgresql,
dialects.mssql,
dialects.mysql,
):
dialect_ = dialect_.dialect()
raw_impl = types.to_instance(impl_, **kw)
class MyType(types.TypeDecorator):
impl = impl_
cache_ok = True
dec_type = MyType(**kw)
eq_(dec_type.impl.__class__, raw_impl.__class__)
raw_dialect_impl = raw_impl.dialect_impl(dialect_)
dec_dialect_impl = dec_type.dialect_impl(dialect_)
eq_(dec_dialect_impl.__class__, MyType)
eq_(
raw_dialect_impl.__class__, dec_dialect_impl.impl.__class__
)
self.assert_compile(MyType(**kw), exp, dialect=dialect_)
def test_user_defined_typedec_impl(self):
class MyType(types.TypeDecorator):
impl = Float
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "sqlite":
return String(50)
else:
return super().load_dialect_impl(dialect)
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
self.assert_compile(t, "VARCHAR(50)", dialect=sl)
self.assert_compile(t, "FLOAT", dialect=pg)
eq_(
t.dialect_impl(dialect=sl).impl.__class__,
String().dialect_impl(dialect=sl).__class__,
)
eq_(
t.dialect_impl(dialect=pg).impl.__class__,
Float().dialect_impl(pg).__class__,
)
@testing.combinations((Boolean,), (Enum,))
def test_typedecorator_schematype_constraint(self, typ):
class B(TypeDecorator):
impl = typ
cache_ok = True
t1 = Table("t1", MetaData(), Column("q", B(create_constraint=True)))
eq_(
len([c for c in t1.constraints if isinstance(c, CheckConstraint)]),
1,
)
def test_type_decorator_repr(self):
class MyType(TypeDecorator):
impl = VARCHAR
cache_ok = True
eq_(repr(MyType(45)), "MyType(length=45)")
def test_user_defined_typedec_impl_bind(self):
class TypeOne(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " ONE"
return go
class TypeTwo(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " TWO"
return go
class MyType(types.TypeDecorator):
impl = TypeOne
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "sqlite":
return TypeOne()
else:
return TypeTwo()
def process_bind_param(self, value, dialect):
return "MYTYPE " + value
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
eq_(t._cached_bind_processor(sl)("foo"), "MYTYPE foo ONE")
eq_(t._cached_bind_processor(pg)("foo"), "MYTYPE foo TWO")
def test_user_defined_dialect_specific_args(self):
class MyType(types.UserDefinedType):
def __init__(self, foo="foo", **kwargs):
super().__init__()
self.foo = foo
self.dialect_specific_args = kwargs
def adapt(self, cls):
return cls(foo=self.foo, **self.dialect_specific_args)
t = MyType(bar="bar")
a = t.dialect_impl(testing.db.dialect)
eq_(a.foo, "foo")
eq_(a.dialect_specific_args["bar"], "bar")
| UserDefinedTest |
python | django__django | django/db/models/fields/related_lookups.py | {
"start": 5682,
"end": 5740
} | class ____(RelatedLookupMixin, Exact):
pass
| RelatedExact |
python | Pylons__pyramid | docs/quick_tutorial/functional_testing/tutorial/tests.py | {
"start": 47,
"end": 412
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_hello_world(self):
from tutorial import hello_world
request = testing.DummyRequest()
response = hello_world(request)
self.assertEqual(response.status_code, 200)
| TutorialViewTests |
python | google__jax | tests/shape_poly_test.py | {
"start": 51660,
"end": 56536
} | class ____(Harness):
"""Tests a function with shape polymorphism.
Exports `fun` with shape polymorphism, then checks that the JAX native and
the exported function produce the same results.
"""
def __init__(self,
group_name: str, name: str,
fun: Callable[..., Any],
*,
arg_descriptors: Sequence[test_harnesses.ArgDescriptor] = (),
polymorphic_shapes: Sequence[str | None] = (),
symbolic_constraints: Sequence[str] = (),
expect_error: tuple[Any, str] | None = None,
check_result: bool = True,
tol: float | None = None,
limitations: Sequence[test_harnesses.Limitation] = (),
override_jax_config_flags: dict[str, Any] = {}):
"""Args:
group_name, name: The name for the harness. See `Harness.__init__`.
fun: the function to be converted. See `Harness.__init__`.
arg_descriptors: The argument descriptors. See `Harness.__init__`.
polymorphic_shapes: For `export.args_specs`.
symbolic_constraints: For `export.args_specs`.
expect_error: an optional pair of an Exception type and a regular
expression to match the expected exception string.
We expect this error during tracing and exporting with shape
polymorphism.
check_result: specifies if we want to check that the result of invoking
the shape polymorphic export produces the same result as the
native JAX function.
tol: the tolerance to use for checking results.
limitations: a sequence of Limitation(s), used for obtaining the default
tolerance (if `tol` is not specified).
override_jax_config_flags: jax.config flags to override for the duration
of the test.
"""
super().__init__(group_name, name, fun, arg_descriptors,
dtype=np.float32)
self.polymorphic_shapes = polymorphic_shapes
self.symbolic_constraints = symbolic_constraints
self.expect_error = expect_error
self.tol = tol
self.check_result = check_result
self.limitations = limitations
self.override_jax_config_flags = override_jax_config_flags
def run_test(self, tst: jtu.JaxTestCase) -> jax.Array | None:
def log_message(extra: str):
return f"[{tst._testMethodName}]: {extra}"
# Check that we have overridden the jax.config flags
for fname, fvalue in self.override_jax_config_flags.items():
tst.assertEqual(getattr(jax.config, fname), fvalue, (
f"Flag {fname} current value {getattr(jax.config, fname)} != {fvalue}"))
f_jax = jax.jit(self.dyn_fun)
args = self.dyn_args_maker(tst.rng())
args = jax.tree.map(jnp.array, args)
args_specs = export.symbolic_args_specs(args, self.polymorphic_shapes,
constraints=self.symbolic_constraints)
if self.expect_error is not None:
with tst.assertRaisesRegex(self.expect_error[0], self.expect_error[1]):
export.export(f_jax)(*args_specs)
return None
exp = export.export(f_jax)(*args_specs)
if not self.check_result:
return None
# Run the JAX natively and then the exported function and compare
res_jax_native = f_jax(*args)
res_jax_exported = exp.call(*args)
custom_assert_lims = [
l for l in self.limitations if l.custom_assert is not None]
assert len(custom_assert_lims) <= 1, custom_assert_lims
tol = None
if self.tol is not None:
tol = self.tol
elif self.limitations:
max_lim = self.limitations[0].get_max_tolerance_limitation(
self.limitations)
if max_lim is not None:
tol = max_lim.tol
if not custom_assert_lims:
tst.assertAllClose(res_jax_native, res_jax_exported,
atol=tol, rtol=tol)
else:
logging.info(log_message(
f"Running custom_assert with tol={tol} due "
f"to {custom_assert_lims[0]}"))
custom_assert_lims[0].custom_assert(tst, res_jax_native,
res_jax_exported, args=args, # type: ignore
tol=tol, err_msg=None)
return res_jax_exported
def check_shape_poly(tst, f_jax: Callable, *,
arg_descriptors: Sequence[test_harnesses.ArgDescriptor] = (),
polymorphic_shapes: Sequence[str | None] = (),
symbolic_constraints: Sequence[str] = (),
expect_error=None) -> jax.Array | None:
# Builds a PolyHarness and runs the test. See PolyHarness documentation.
h = PolyHarness("", "", jax.jit(f_jax),
arg_descriptors=arg_descriptors,
polymorphic_shapes=polymorphic_shapes,
symbolic_constraints=symbolic_constraints,
expect_error=expect_error)
return h.run_test(tst)
| PolyHarness |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/components.py | {
"start": 3449,
"end": 4412
} | class ____(Decoder):
"""
Decoder strategy that returns the json-encoded content of a response, if any.
"""
parameters: InitVar[Mapping[str, Any]]
def is_stream_response(self) -> bool:
return False
def decode(self, response: requests.Response) -> Generator[MutableMapping[str, Any], None, None]:
try:
document = gzip.decompress(response.content).decode("iso-8859-1")
except gzip.BadGzipFile:
document = response.content.decode("iso-8859-1")
try:
parsed = xmltodict.parse(document, attr_prefix="", cdata_key="value", force_list={"Message"})
except Exception as e:
logger.warning(f"Unable to parse the report for the stream {self.name}, error: {str(e)}")
return []
reports = parsed.get("AmazonEnvelope", {}).get("Message", {})
for report in reports:
yield report.get("OrderReport", {})
@dataclass
| GzipXmlDecoder |
python | django__django | tests/auth_tests/test_migrations.py | {
"start": 4731,
"end": 9768
} | class ____(TransactionTestCase):
available_apps = [
"auth_tests",
"django.contrib.auth",
"django.contrib.contenttypes",
]
def setUp(self):
"""
Create proxy permissions with content_type to the concrete model
rather than the proxy model (as they were before Django 2.2 and
migration 11).
"""
Permission.objects.all().delete()
self.concrete_content_type = ContentType.objects.get_for_model(Proxy)
self.default_permission = Permission.objects.create(
content_type=self.concrete_content_type,
codename="add_proxy",
name="Can add proxy",
)
self.custom_permission = Permission.objects.create(
content_type=self.concrete_content_type,
codename="display_proxys",
name="May display proxys information",
)
def test_proxy_model_permissions_contenttype(self):
proxy_model_content_type = ContentType.objects.get_for_model(
Proxy, for_concrete_model=False
)
self.assertEqual(
self.default_permission.content_type, self.concrete_content_type
)
self.assertEqual(
self.custom_permission.content_type, self.concrete_content_type
)
with connection.schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
self.default_permission.refresh_from_db()
self.custom_permission.refresh_from_db()
self.assertEqual(self.default_permission.content_type, proxy_model_content_type)
self.assertEqual(self.custom_permission.content_type, proxy_model_content_type)
def test_user_still_has_proxy_model_permissions(self):
user = User.objects.create()
user.user_permissions.add(self.default_permission)
user.user_permissions.add(self.custom_permission)
for permission in [self.default_permission, self.custom_permission]:
self.assertTrue(user.has_perm("auth_tests." + permission.codename))
with connection.schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
# Reload user to purge the _perm_cache.
user = User._default_manager.get(pk=user.pk)
for permission in [self.default_permission, self.custom_permission]:
self.assertTrue(user.has_perm("auth_tests." + permission.codename))
def test_migrate_backwards(self):
with connection.schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
update_proxy_permissions.revert_proxy_model_permissions(apps, editor)
self.default_permission.refresh_from_db()
self.assertEqual(
self.default_permission.content_type, self.concrete_content_type
)
self.custom_permission.refresh_from_db()
self.assertEqual(
self.custom_permission.content_type, self.concrete_content_type
)
def test_user_keeps_same_permissions_after_migrating_backward(self):
user = User.objects.create()
user.user_permissions.add(self.default_permission)
user.user_permissions.add(self.custom_permission)
for permission in [self.default_permission, self.custom_permission]:
self.assertTrue(user.has_perm("auth_tests." + permission.codename))
with connection.schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
update_proxy_permissions.revert_proxy_model_permissions(apps, editor)
# Reload user to purge the _perm_cache.
user = User._default_manager.get(pk=user.pk)
for permission in [self.default_permission, self.custom_permission]:
self.assertTrue(user.has_perm("auth_tests." + permission.codename))
def test_migrate_with_existing_target_permission(self):
"""
Permissions may already exist:
- Old workaround was to manually create permissions for proxy models.
- Model may have been concrete and then converted to proxy.
Output a reminder to audit relevant permissions.
"""
proxy_model_content_type = ContentType.objects.get_for_model(
Proxy, for_concrete_model=False
)
Permission.objects.create(
content_type=proxy_model_content_type,
codename="add_proxy",
name="Can add proxy",
)
Permission.objects.create(
content_type=proxy_model_content_type,
codename="display_proxys",
name="May display proxys information",
)
with captured_stdout() as stdout:
with connection.schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
self.assertIn(
"A problem arose migrating proxy model permissions", stdout.getvalue()
)
| ProxyModelWithSameAppLabelTests |
python | pytorch__pytorch | torch/profiler/profiler.py | {
"start": 24098,
"end": 38305
} | class ____(_KinetoProfile):
"""Profiler context manager.
Args:
activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``,
``torch.profiler.ProfilerActivity.XPU``.
Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA
or (when available) ProfilerActivity.XPU.
schedule (Callable): callable that takes step (int) as a single parameter and returns
``ProfilerAction`` value that specifies the profiler action to perform at each step.
on_trace_ready (Callable): callable that is called at each step when ``schedule``
returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
record_shapes (bool): save information about operator's input shapes.
profile_memory (bool): track tensor memory allocation/deallocation.
with_stack (bool): record source information (file and line number) for the ops.
with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators
(matrix multiplication and 2D convolution).
with_modules (bool): record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op,
then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
experimental_config (_ExperimentalConfig) : A set of experimental options
used for Kineto library features. Note, backward compatibility is not guaranteed.
execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object.
`PyTorch Execution Traces <https://arxiv.org/pdf/2305.14516.pdf>`__ offer a graph based
representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators.
When this argument is included the observer start() and stop() will be called for the
same time window as PyTorch profiler. See the examples section below for a code sample.
acc_events (bool): Enable the accumulation of FunctionEvents across multiple profiling cycles
use_cuda (bool):
.. deprecated:: 1.8.1
use ``activities`` instead.
.. note::
Use :func:`~torch.profiler.schedule` to generate the callable schedule.
Non-default schedules are useful when profiling long training jobs
and allow the user to obtain multiple traces at the different iterations
of the training process.
The default schedule simply records all the events continuously for the
duration of the context manager.
.. note::
Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:
``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``
After profiling, result files can be found in the specified directory. Use the command:
``tensorboard --logdir dir_name``
to see the results in TensorBoard.
For more information, see
`PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__
.. note::
Enabling shape and stack tracing results in additional overhead.
When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
that may further prevent certain optimizations that depend on the reference count and introduce
extra tensor copies.
Examples:
.. code-block:: python
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
]
) as p:
code_to_profile()
print(p.key_averages().table(sort_by="self_cuda_time_total", row_limit=-1))
Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:
.. code-block:: python
# Non-default profiler schedule allows user to turn profiler on and off
# on different iterations of the training loop;
# trace_handler is called every time a new trace becomes available
def trace_handler(prof):
print(
prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=-1)
)
# prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json")
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
# In this example with wait=1, warmup=1, active=2, repeat=1,
# profiler will skip the first step/iteration,
# start warming up on the second, record
# the third and the forth iterations,
# after which the trace will become available
# and on_trace_ready (when set) is called;
# the cycle repeats starting with the next step
schedule=torch.profiler.schedule(wait=1, warmup=1, active=2, repeat=1),
on_trace_ready=trace_handler,
# on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
# used when outputting for tensorboard
) as p:
for iter in range(N):
code_iteration_to_profile(iter)
# send a signal to the profiler that the next iteration has started
p.step()
The following sample shows how to setup up an Execution Trace Observer (`execution_trace_observer`)
.. code-block:: python
with torch.profiler.profile(
...
execution_trace_observer=(
ExecutionTraceObserver().register_callback("./execution_trace.json")
),
) as p:
for iter in range(N):
code_iteration_to_profile(iter)
p.step()
You can also refer to test_execution_trace_with_kineto() in tests/profiler/test_profiler.py.
Note: One can also pass any object satisfying the _ITraceObserver interface.
"""
def __init__(
self,
*,
activities: Optional[Iterable[ProfilerActivity]] = None,
schedule: Optional[Callable[[int], ProfilerAction]] = None,
on_trace_ready: Optional[Callable[..., Any]] = None,
record_shapes: bool = False,
profile_memory: bool = False,
with_stack: bool = False,
with_flops: bool = False,
with_modules: bool = False,
experimental_config: Optional[_ExperimentalConfig] = None,
execution_trace_observer: Optional[_ITraceObserver] = None,
acc_events: bool = False,
# deprecated:
use_cuda: Optional[bool] = None,
custom_trace_id_callback: Optional[Callable[[], str]] = None,
) -> None:
activities_set = set(activities) if activities else supported_activities()
if use_cuda is not None:
warn(
"`use_cuda` is deprecated, use `activities` argument instead",
FutureWarning,
stacklevel=2,
)
if use_cuda:
activities_set.add(ProfilerActivity.CUDA)
elif ProfilerActivity.CUDA in activities_set:
activities_set.remove(ProfilerActivity.CUDA)
if len(activities_set) == 0:
raise AssertionError("No valid profiler activities found")
super().__init__(
activities=activities,
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
experimental_config=experimental_config,
execution_trace_observer=execution_trace_observer
if execution_trace_observer
else ExecutionTraceObserver.build_execution_trace_obs_from_env(),
acc_events=acc_events,
custom_trace_id_callback=custom_trace_id_callback,
)
if schedule:
self.schedule = schedule
# add step markers into the trace and table view
self.record_steps = True
else:
self.schedule = _default_schedule_fn
self.record_steps = False
self.on_trace_ready = on_trace_ready
self.step_num = 0
self.current_action = self.schedule(self.step_num)
self.step_rec_fn: Optional[prof.record_function] = None
self.action_map: dict[
tuple[ProfilerAction, Optional[ProfilerAction]], list[Any]
] = {
# key is (prev_action, current_action), value is action list corresponding to the state pair.
(ProfilerAction.NONE, ProfilerAction.NONE): [],
(ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace],
(ProfilerAction.NONE, ProfilerAction.RECORD): [
self.prepare_trace,
self.start_trace,
],
(ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [
self.prepare_trace,
self.start_trace,
],
(ProfilerAction.WARMUP, ProfilerAction.NONE): [
partial(warn, "Incorrect schedule: WARMUP followed by NONE"),
self.start_trace,
self.stop_trace,
],
(ProfilerAction.WARMUP, ProfilerAction.WARMUP): [],
(ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace],
(ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace],
(ProfilerAction.RECORD, ProfilerAction.NONE): [
partial(warn, "Incorrect schedule: RECORD followed by NONE"),
self.stop_trace,
],
(ProfilerAction.RECORD, ProfilerAction.WARMUP): [
partial(warn, "Incorrect schedule: RECORD followed by WARMUP"),
self.stop_trace,
],
(ProfilerAction.RECORD, ProfilerAction.RECORD): [],
(ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [
self.stop_trace,
self._trace_ready,
],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [
self.stop_trace,
self._trace_ready,
self.prepare_trace,
],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [
self.stop_trace,
self._trace_ready,
self.prepare_trace,
self.start_trace,
],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [
self.stop_trace,
self._trace_ready,
self.prepare_trace,
self.start_trace,
],
# used for exit action
(ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace],
(ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready],
(ProfilerAction.RECORD_AND_SAVE, None): [
self.stop_trace,
self._trace_ready,
],
}
# Start tracking increments to profiler step, this will be used
# by Kineto
prof.KinetoStepTracker.init_step_count(PROFILER_STEP_NAME)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
prof.KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)
if self.execution_trace_observer:
self.execution_trace_observer.cleanup()
def start(self) -> None:
self._transit_action(ProfilerAction.NONE, self.current_action)
if self.record_steps:
self.step_rec_fn = prof.record_function(
"ProfilerStep#" + str(self.step_num)
)
self.step_rec_fn.__enter__()
def stop(self) -> None:
if self.record_steps and self.step_rec_fn:
self.step_rec_fn.__exit__(None, None, None)
self._transit_action(self.current_action, None)
def step(self) -> None:
"""
Signals the profiler that the next profiling step has started.
"""
if self.record_steps and self.step_rec_fn:
self.step_rec_fn.__exit__(None, None, None)
prev_action = self.current_action
self.step_num += 1
self.current_action = self.schedule(self.step_num)
self._transit_action(prev_action, self.current_action)
if os.environ.get("KINETO_USE_DAEMON", "") or (
is_fbcode() and os.environ.get("KINETO_FORCE_STEP_HOOK", "")
):
prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME)
if self.record_steps:
self.step_rec_fn = prof.record_function(
"ProfilerStep#" + str(self.step_num)
)
self.step_rec_fn.__enter__()
def set_custom_trace_id_callback(self, callback) -> None:
"""
Sets a callback to be called when a new trace ID is generated.
"""
self.custom_trace_id_callback = callback
def get_trace_id(self):
"""
Returns the current trace ID.
"""
if self.profiler is None:
return None
return self.profiler.trace_id
def _trace_ready(self) -> None:
if self.on_trace_ready:
self.on_trace_ready(self)
def _transit_action(self, prev_action, current_action) -> None:
action_list = self.action_map.get((prev_action, current_action))
if action_list:
for action in action_list:
action()
def _stats(self) -> Optional[prof._ProfilerStats]:
if self.profiler is None:
return None
return self.profiler._stats
| profile |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/test/utils.py | {
"start": 949,
"end": 1189
} | class ____(Protocol):
@property
def data(self) -> Mapping[str, Any]: ...
@property
def errors(self) -> Optional[Sequence[str]]: ...
Selector: TypeAlias = dict[str, Any]
GqlVariables: TypeAlias = Mapping[str, Any]
| GqlResult |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 15961,
"end": 16118
} | class ____(TicketActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.JIRA_SERVER
| JiraServerActionTranslatorBase |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 6107,
"end": 7961
} | class ____(App[None]):
"""Base application class that can be used to record keystrokes."""
ALPHAS = "abcxyz"
"""str: The alpha keys to test against."""
ALL_KEYS = [*ALPHAS, *MOVEMENT_KEYS]
"""list[str]: All the test keys."""
@staticmethod
def make_bindings(action_prefix: str = "") -> list[Binding]:
"""Make the binding list for testing an app.
Args:
action_prefix (str, optional): An optional prefix for the action name.
Returns:
list[Binding]: The resulting list of bindings.
"""
return [
Binding(key, f"{action_prefix}record('{key}')", key)
for key in [*AppKeyRecorder.ALPHAS, *MOVEMENT_KEYS]
]
def __init__(self) -> None:
"""Initialise the recording app."""
super().__init__()
self.pressed_keys: list[str] = []
async def action_record(self, key: str) -> None:
"""Record a key, as used from a binding.
Args:
key (str): The name of the key to record.
"""
self.pressed_keys.append(key)
def all_recorded(self, marker_prefix: str = "") -> None:
"""Were all the bindings recorded from the presses?
Args:
marker_prefix (str, optional): An optional prefix for the result markers.
"""
assert self.pressed_keys == [f"{marker_prefix}{key}" for key in self.ALL_KEYS]
##############################################################################
# An app with bindings for movement keys.
#
# Having gone through various permutations of testing for what bindings are
# seen to be in place, we now move on to adding bindings, invoking them and
# seeing what happens. First off let's start with an application that has
# bindings, both for an alpha key, and also for all of the movement keys.
| AppKeyRecorder |
python | fluentpython__example-code | 21-class-metaprog/bulkfood/model_v7.py | {
"start": 1694,
"end": 1800
} | class ____(metaclass=EntityMeta): # <3>
"""Business entity with validated fields"""
# END MODEL_V7
| Entity |
python | boto__boto3 | tests/unit/test_utils.py | {
"start": 741,
"end": 1713
} | class ____(unittest.TestCase):
def test_lazy_call(self):
with mock.patch('boto3.utils.import_module') as importer:
importer.return_value = FakeModule
lazy_function = utils.lazy_call(
'fakemodule.FakeModule.entry_point'
)
assert lazy_function(a=1, b=2) == {'a': 1, 'b': 2}
def test_import_module(self):
module = utils.import_module('boto3.s3.transfer')
assert module.__name__ == 'boto3.s3.transfer'
assert isinstance(module, types.ModuleType)
def test_inject_attributes_with_no_shadowing(self):
class_attributes = {}
utils.inject_attribute(class_attributes, 'foo', 'bar')
assert class_attributes['foo'] == 'bar'
def test_shadowing_existing_var_raises_exception(self):
class_attributes = {'foo': 'preexisting'}
with pytest.raises(RuntimeError):
utils.inject_attribute(class_attributes, 'foo', 'bar')
| TestUtils |
python | google__jax | jax/_src/stages.py | {
"start": 35537,
"end": 35651
} | class ____(NamedTuple):
source_info: source_info_util.SourceInfo
eqn_name: str
@dataclasses.dataclass
| SourceInfo |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/offsets.py | {
"start": 87,
"end": 340
} | class ____(Static):
DEFAULT_CSS = """
Box {
border: solid white;
background: darkblue;
width: 16;
height: auto;
}
"""
def compose(self) -> ComposeResult:
yield Label("FOO\nBAR\nBAZ")
| Box |
python | python-poetry__poetry | src/poetry/vcs/git/backend.py | {
"start": 2599,
"end": 6436
} | class ____:
branch: str | None = None
revision: str | None = None
tag: str | None = None
ref: bytes = dataclasses.field(default_factory=lambda: b"HEAD")
def resolve(self, remote_refs: FetchPackResult, repo: Repo) -> None:
"""
Resolve the ref using the provided remote refs.
"""
self._normalise(remote_refs=remote_refs, repo=repo)
self._set_head(remote_refs=remote_refs)
def _normalise(self, remote_refs: FetchPackResult, repo: Repo) -> None:
"""
Internal helper method to determine if given revision is
1. a branch or tag; if so, set corresponding properties.
2. a short sha; if so, resolve full sha and set as revision
"""
if self.revision:
ref = f"refs/tags/{self.revision}".encode()
if ref in remote_refs.refs or annotated_tag(ref) in remote_refs.refs:
# this is a tag, incorrectly specified as a revision, tags take priority
self.tag = self.revision
self.revision = None
elif (
self.revision.encode("utf-8") in remote_refs.refs
or f"refs/heads/{self.revision}".encode() in remote_refs.refs
):
# this is most likely a ref spec or a branch incorrectly specified
self.branch = self.revision
self.revision = None
elif (
self.branch
and f"refs/heads/{self.branch}".encode() not in remote_refs.refs
and (
f"refs/tags/{self.branch}".encode() in remote_refs.refs
or annotated_tag(f"refs/tags/{self.branch}") in remote_refs.refs
)
):
# this is a tag incorrectly specified as a branch
self.tag = self.branch
self.branch = None
if self.revision and self.is_sha_short:
# revision is a short sha, resolve to full sha
short_sha = self.revision.encode("utf-8")
for sha in remote_refs.refs.values():
if sha.startswith(short_sha):
self.revision = sha.decode("utf-8")
return
# no heads with such SHA, let's check all objects
for sha in repo.object_store.iter_prefix(short_sha):
self.revision = sha.decode("utf-8")
return
def _set_head(self, remote_refs: FetchPackResult) -> None:
"""
Internal helper method to populate ref and set it's sha as the remote's head
and default ref.
"""
self.ref = remote_refs.symrefs[b"HEAD"]
if self.revision:
head = self.revision.encode("utf-8")
else:
if self.tag:
ref = f"refs/tags/{self.tag}".encode()
annotated = annotated_tag(ref)
self.ref = annotated if annotated in remote_refs.refs else ref
elif self.branch:
self.ref = (
self.branch.encode("utf-8")
if self.is_ref
else f"refs/heads/{self.branch}".encode()
)
head = remote_refs.refs[self.ref]
remote_refs.refs[self.ref] = remote_refs.refs[b"HEAD"] = head
@property
def key(self) -> str:
return self.revision or self.branch or self.tag or self.ref.decode("utf-8")
@property
def is_sha(self) -> bool:
return is_revision_sha(revision=self.revision)
@property
def is_ref(self) -> bool:
return self.branch is not None and (
self.branch.startswith("refs/") or self.branch == "HEAD"
)
@property
def is_sha_short(self) -> bool:
return self.revision is not None and self.is_sha and len(self.revision) < 40
@dataclasses.dataclass
| GitRefSpec |
python | huggingface__transformers | tests/models/nllb_moe/test_modeling_nllb_moe.py | {
"start": 22345,
"end": 33459
} | class ____(unittest.TestCase):
r"""
Switch Transformers has different blocks from classic transformer based models.
The Swift MLP contains a Router class, that has to be tested to check if it is correctly implemented
Original implementation of the routers here:
"""
config = NllbMoeConfig(
num_experts=4,
hidden_size=32,
d_ff=16,
expert_capacity=4,
)
batch_size = 2
sequence_length = 20
def test_top_2_routing(self):
# test routing with minimal reproduction
mask = torch.ones((self.batch_size, self.sequence_length), dtype=torch.bool)
mask[0][0] = False
mask[1][0] = False
mask = mask.reshape(-1)
set_seed(0)
hidden_states = torch.rand((self.batch_size, self.sequence_length, self.config.hidden_size))
classifier = torch.nn.Linear(self.config.hidden_size, self.config.num_experts)
hf_router = NllbMoeTop2Router(self.config)
_, _, hidden_dim = hidden_states.shape
logits = classifier(hidden_states.reshape((self.batch_size * self.sequence_length), hidden_dim))
top_1_mask, router_probs = hf_router.route_tokens(logits, padding_mask=mask)
torch.argmax(top_1_mask, dim=-1)
router_mask = router_probs.bool()
set_seed(0)
experts = [
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
]
hidden_states = hidden_states.reshape((self.batch_size * self.sequence_length), hidden_dim)
masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask)
for idx, expert in enumerate(experts):
token_indices = router_mask[:, idx]
combining_weights = router_probs[token_indices, idx]
expert_output = expert(masked_hidden_states[idx, token_indices])
expert_output *= 1 - self.config.moe_token_dropout
masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output)
hidden_states = masked_hidden_states.sum(dim=0).reshape(self.batch_size, self.sequence_length, hidden_dim)
EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES = torch.Tensor([[ 7.0340e-04, 2.7997e-03, -1.3351e-02, -7.6705e-03, -3.5089e-03,3.9773e-03, 7.4593e-03, 1.2566e-02, 3.5860e-03, -2.7448e-02,-1.3731e-02, -1.0534e-02, -1.3606e-02, -1.5048e-02, -2.8914e-03,-5.0371e-03, -1.3963e-03, 6.0076e-03, -1.1380e-02, -1.4620e-02, 5.2401e-03, 8.4660e-04, -1.5319e-03, -1.6735e-02, 1.1302e-02, 3.6119e-03, 4.6084e-03, -1.3458e-02, 7.7792e-05, 1.4312e-02, 4.9107e-03, -5.0936e-03], [-4.4538e-03, 3.1026e-03, 1.4121e-04, -4.8121e-03, -5.6279e-03, 7.2493e-03, 3.9769e-03, 1.1114e-02, -1.5666e-03, -2.3477e-02, 8.7268e-03, 1.3446e-02, -2.8845e-05, -1.7287e-02, 8.7619e-03, -4.5316e-03, -1.2164e-02, 5.7461e-03, -4.5861e-03, -9.3907e-03, 2.9808e-02, 8.9206e-04, -7.6232e-04, -1.4173e-02, 3.0208e-03, 1.5310e-02, 9.7717e-03, 3.1014e-03, 7.8042e-03, 8.0197e-03, 3.4784e-03, -7.1728e-03]]) # fmt: skip
torch.testing.assert_close(hidden_states.mean(1), EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES, atol=1e-4, rtol=1e-4)
def test_batch_prioritized_routing(self):
set_seed(0)
config = NllbMoeConfig(
num_experts=4, hidden_size=32, d_ff=16, expert_capacity=4, second_expert_policy="random"
)
mask = torch.zeros((self.batch_size * self.sequence_length), dtype=torch.bool)
logits = torch.rand((self.batch_size * self.sequence_length, 4))
config.batch_prioritized_routing = True
router = NllbMoeTop2Router(config)
top_1_mask, _ = router.route_tokens(logits, padding_mask=mask)
# check that the routing is batch first. One of the last token is routed while expert capacity is very small
# this means that it had a greater probability of being routed
assert top_1_mask[-1, 0] == 1
def test_second_expert_policy(self):
config = NllbMoeConfig(
num_experts=4,
hidden_size=32,
d_ff=16,
expert_capacity=40,
)
set_seed(0)
mask = torch.zeros((self.batch_size * self.sequence_length), dtype=torch.bool)
logits = torch.rand((self.batch_size * self.sequence_length, 4))
set_seed(0)
config.second_expert_policy = "random"
router = NllbMoeTop2Router(config)
top_1_mask, router_probs = router.route_tokens(logits, padding_mask=mask)
set_seed(0)
config.second_expert_policy = "sampling"
router = NllbMoeTop2Router(config)
top_1_mask_sp, router_probs_sp = router.route_tokens(logits, padding_mask=mask)
set_seed(0)
config.second_expert_policy = "all"
router = NllbMoeTop2Router(config)
top_1_mask_all, router_probs_all = router.route_tokens(logits, padding_mask=mask)
# fmt: off
EXPECTED_ROUTER_ALL = torch.tensor([[0.3902, 0.0000, 0.0000, 0.6098], [0.0000, 0.0000, 0.7770, 0.2230], [0.0000, 0.0000, 0.2726, 0.7274], [0.4221, 0.0000, 0.5779, 0.0000], [0.0000, 0.0000, 0.7810, 0.2190], [0.5518, 0.4482, 0.0000, 0.0000], [0.0000, 0.4060, 0.5940, 0.0000], [0.7340, 0.0000, 0.0000, 0.2660], [0.4778, 0.5222, 0.0000, 0.0000], [0.0000, 0.3984, 0.0000, 0.6016], [0.0000, 0.0548, 0.9452, 0.0000], [0.6796, 0.0000, 0.0000, 0.3204], [0.0700, 0.0000, 0.9300, 0.0000], [0.1854, 0.0000, 0.8146, 0.0000], [0.6775, 0.3225, 0.0000, 0.0000], [0.0000, 0.0000, 0.5027, 0.4973], [0.0000, 0.6577, 0.0000, 0.3423], [0.0000, 0.7767, 0.0000, 0.2233], [0.1944, 0.8056, 0.0000, 0.0000], [0.0000, 0.3073, 0.0000, 0.6927], [0.0000, 0.5655, 0.4345, 0.0000], [0.5791, 0.0000, 0.0000, 0.4209], [0.0440, 0.0000, 0.9560, 0.0000], [0.0083, 0.9917, 0.0000, 0.0000], [0.0000, 0.8395, 0.0000, 0.1605], [0.0000, 0.1458, 0.0000, 0.8542], [0.0000, 0.8534, 0.1466, 0.0000], [0.4938, 0.0000, 0.0000, 0.5062], [0.1329, 0.8671, 0.0000, 0.0000], [0.3058, 0.0000, 0.6942, 0.0000], [0.4458, 0.0000, 0.0000, 0.5542], [0.9053, 0.0947, 0.0000, 0.0000], [0.0000, 0.7563, 0.2437, 0.0000], [0.0000, 0.0000, 0.4096, 0.5904], [0.4551, 0.0000, 0.0000, 0.5449], [0.8502, 0.1498, 0.0000, 0.0000], [0.0000, 0.6312, 0.3688, 0.0000], [0.8920, 0.0000, 0.0000, 0.1080], [0.1913, 0.0000, 0.0000, 0.8087], [0.2491, 0.7509, 0.0000, 0.0000]])
EXPECTED_ROUTER_SP = torch.tensor([[0.0000, 0.6539, 0.0000, 0.3461], [0.0000, 0.0000, 0.3998, 0.6002], [0.0000, 0.5574, 0.0000, 0.4426], [0.0000, 0.0000, 0.4441, 0.5559], [0.0000, 0.6545, 0.3455, 0.0000], [0.4419, 0.5581, 0.0000, 0.0000], [0.0000, 0.4014, 0.5986, 0.0000], [0.3215, 0.0000, 0.0000, 0.6785], [0.4765, 0.5235, 0.0000, 0.0000], [0.0000, 0.5467, 0.0000, 0.4533], [0.0000, 0.4156, 0.5844, 0.0000], [0.3370, 0.0000, 0.6630, 0.0000], [0.0000, 0.0000, 0.4558, 0.5442], [0.4659, 0.0000, 0.5341, 0.0000], [0.6179, 0.3821, 0.0000, 0.0000], [0.6277, 0.0000, 0.3723, 0.0000], [0.5836, 0.4164, 0.0000, 0.0000], [0.0000, 0.6600, 0.0000, 0.3400], [0.0000, 0.4933, 0.0000, 0.5067], [0.6016, 0.0000, 0.0000, 0.3984], [0.0000, 0.5160, 0.4840, 0.0000], [0.5799, 0.0000, 0.0000, 0.4201], [0.0000, 0.0000, 0.4826, 0.5174], [0.5426, 0.4574, 0.0000, 0.0000], [0.5362, 0.4638, 0.0000, 0.0000], [0.6448, 0.0000, 0.0000, 0.3552], [0.0000, 0.5909, 0.4091, 0.0000], [0.4196, 0.0000, 0.0000, 0.5804], [0.3191, 0.6809, 0.0000, 0.0000], [0.0000, 0.0000, 0.4886, 0.5114], [0.4899, 0.0000, 0.0000, 0.5101], [0.4123, 0.0000, 0.5877, 0.0000], [0.0000, 0.3736, 0.0000, 0.6264], [0.0000, 0.0000, 0.6009, 0.3991], [0.4246, 0.0000, 0.0000, 0.5754], [0.4997, 0.0000, 0.5003, 0.0000], [0.0000, 0.3595, 0.6405, 0.0000], [0.5433, 0.0000, 0.0000, 0.4567], [0.0000, 0.6806, 0.0000, 0.3194], [0.6689, 0.3311, 0.0000, 0.0000]])
EXPECTED_ROUTER = torch.tensor([[0.4324, 0.5676, 0.0000, 0.0000], [0.0000, 0.4348, 0.0000, 0.5652], [0.4559, 0.5441, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.4744, 0.5256, 0.0000, 0.0000], [0.0000, 0.5103, 0.0000, 0.4897], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.5467, 0.0000, 0.4533], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.0000, 0.0000, 1.0000, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000], [0.5063, 0.4937, 0.0000, 0.0000], [0.5396, 0.0000, 0.0000, 0.4604], [0.4576, 0.5424, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.5134, 0.0000, 0.4866, 0.0000], [0.0000, 0.5160, 0.4840, 0.0000], [0.5439, 0.0000, 0.4561, 0.0000], [0.4849, 0.0000, 0.0000, 0.5151], [0.5426, 0.4574, 0.0000, 0.0000], [0.5362, 0.4638, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.4448, 0.0000, 0.5552], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4886, 0.5114], [0.4899, 0.0000, 0.0000, 0.5101], [0.0000, 0.0000, 0.5296, 0.4704], [0.0000, 0.0000, 0.4469, 0.5531], [0.0000, 0.4053, 0.5947, 0.0000], [0.0000, 0.0000, 0.4460, 0.5540], [0.4997, 0.0000, 0.5003, 0.0000], [0.0000, 0.0000, 0.5851, 0.4149], [1.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.5010, 0.4990, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000]])
EXPECTED_TOP_1_ALL = torch.LongTensor([[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0]])
EXPECTED_TOP_1_SP = torch.LongTensor([[0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
# `sampling` and `random` do not affect the mask of the top_1 router
# fmt: on
torch.testing.assert_close(router_probs_all, EXPECTED_ROUTER_ALL, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(router_probs_sp, EXPECTED_ROUTER_SP, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(router_probs, EXPECTED_ROUTER, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(top_1_mask_all, EXPECTED_TOP_1_ALL, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(top_1_mask_sp, EXPECTED_TOP_1_SP, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(top_1_mask, EXPECTED_TOP_1_SP, rtol=1e-4, atol=1e-4)
| NllbMoeRouterTest |
python | dask__distributed | distributed/protocol/serialize.py | {
"start": 18618,
"end": 26763
} | class ____:
"""An object that is already pickled into header and frames
Normal pickled objects are unpickled by the scheduler.
"""
def __init__(self, header, frames):
self.header = header
self.frames = frames
def __eq__(self, other):
return (
isinstance(other, type(self))
and other.header == self.header
and other.frames == self.frames
)
def __ne__(self, other):
return not (self == other)
def nested_deserialize(x):
warnings.warn(
"nested_deserialize is deprecated and will be removed in a future release.",
DeprecationWarning,
)
return _nested_deserialize(x, emulate_deserialize=True)
def _nested_deserialize(x, emulate_deserialize=True):
"""
Replace all Serialize and Serialized values nested in *x*
with the original values. Returns a copy of *x*.
>>> msg = {'op': 'update', 'data': to_serialize(123)}
>>> nested_deserialize(msg)
{'op': 'update', 'data': 123}
"""
def replace_inner(x):
if type(x) is dict:
x = x.copy()
for k, v in x.items():
typ = type(v)
if typ is dict or typ is list:
x[k] = replace_inner(v)
if emulate_deserialize:
if typ is Serialize:
x[k] = v.data
elif typ is Serialized:
x[k] = deserialize(v.header, v.frames)
if typ is ToPickle:
x[k] = v.data
elif type(x) is list:
x = list(x)
for k, v in enumerate(x):
typ = type(v)
if typ is dict or typ is list:
x[k] = replace_inner(v)
if emulate_deserialize:
if typ is Serialize:
x[k] = v.data
elif typ is Serialized:
x[k] = deserialize(v.header, v.frames)
if typ is ToPickle:
x[k] = v.data
return x
return replace_inner(x)
@sizeof.register(ToPickle)
@sizeof.register(Serialize)
def sizeof_serialize(obj):
return sizeof(obj.data)
@sizeof.register(Pickled)
@sizeof.register(Serialized)
def sizeof_serialized(obj):
return sizeof(obj.header) + sizeof(obj.frames)
def serialize_bytelist(
x: object, compression: str | None | Literal[False] = "auto", **kwargs: Any
) -> list[bytes | bytearray | memoryview]:
header, frames = serialize_and_split(x, **kwargs)
if frames:
header["compression"], frames = zip(
*(maybe_compress(frame, compression=compression) for frame in frames)
)
header["count"] = len(frames)
header = msgpack.dumps(header, use_bin_type=True)
frames2 = [header, *frames]
frames2.insert(0, pack_frames_prelude(frames2))
return frames2
def serialize_bytes(x: object, **kwargs: Any) -> bytes:
L = serialize_bytelist(x, **kwargs)
return b"".join(L)
def deserialize_bytes(b: bytes | bytearray | memoryview) -> Any:
"""Deserialize the output of :func:`serialize_bytes`"""
frames = unpack_frames(b)
bin_header, frames = frames[0], frames[1:]
if bin_header:
header = msgpack.loads(bin_header, raw=False, use_list=False)
else:
header = {}
frames2 = decompress(header, frames)
return merge_and_deserialize(header, frames2)
################################
# Class specific serialization #
################################
def register_serialization(cls, serialize, deserialize):
"""Register a new class for dask-custom serialization
Parameters
----------
cls : type
serialize : callable(cls) -> Tuple[Dict, List[bytes]]
deserialize : callable(header: Dict, frames: List[bytes]) -> cls
Examples
--------
>>> class Human:
... def __init__(self, name):
... self.name = name
>>> def serialize(human):
... header = {}
... frames = [human.name.encode()]
... return header, frames
>>> def deserialize(header, frames):
... return Human(frames[0].decode())
>>> register_serialization(Human, serialize, deserialize)
>>> serialize(Human('Alice'))
({}, [b'Alice'])
See Also
--------
serialize
deserialize
"""
if isinstance(cls, str):
raise TypeError(
"Strings are no longer accepted for type registration. "
"Use dask_serialize.register_lazy instead"
)
dask_serialize.register(cls)(serialize)
dask_deserialize.register(cls)(deserialize)
def register_serialization_lazy(toplevel, func):
"""Register a registration function to be called if *toplevel*
module is ever loaded.
"""
raise Exception("Serialization registration has changed. See documentation")
@partial(normalize_token.register, Serialized)
def normalize_Serialized(o):
return [o.header] + o.frames # for dask.tokenize.tokenize
# Teach serialize how to handle bytes
@dask_serialize.register(bytes)
def _serialize_bytes(obj):
header = {} # no special metadata
frames = [obj]
return header, frames
# Teach serialize how to handle bytestrings
@dask_serialize.register(bytearray)
def _serialize_bytearray(obj):
header = {} # no special metadata
frames = [obj]
return header, frames
@dask_deserialize.register(bytes)
def _deserialize_bytes(header, frames):
if len(frames) == 1 and isinstance(frames[0], bytes):
return frames[0]
else:
return b"".join(frames)
@dask_deserialize.register(bytearray)
def _deserialize_bytearray(header, frames):
if len(frames) == 1 and isinstance(frames[0], bytearray):
return frames[0]
else:
return bytearray().join(frames)
@dask_serialize.register(array)
def _serialize_array(obj):
header = {"typecode": obj.typecode, "writeable": (None,)}
frames = [memoryview(obj)]
return header, frames
@dask_deserialize.register(array)
def _deserialize_array(header, frames):
a = array(header["typecode"])
nframes = len(frames)
if nframes == 1:
a.frombytes(ensure_memoryview(frames[0]))
elif nframes > 1:
a.frombytes(b"".join(map(ensure_memoryview, frames)))
return a
@dask_serialize.register(memoryview)
def _serialize_memoryview(obj):
if obj.format == "O":
raise ValueError("Cannot serialize `memoryview` containing Python objects")
if not obj and obj.ndim > 1:
raise ValueError("Cannot serialize empty non-1-D `memoryview`")
header = {"format": obj.format, "shape": obj.shape}
frames = [obj]
return header, frames
@dask_deserialize.register(memoryview)
def _deserialize_memoryview(header, frames):
if len(frames) == 1:
out = ensure_memoryview(frames[0])
else:
out = memoryview(b"".join(frames))
# handle empty `memoryview`s
if out:
out = out.cast(header["format"], header["shape"])
else:
out = out.cast(header["format"])
assert out.shape == header["shape"]
return out
#########################
# Descend into __dict__ #
#########################
def _is_msgpack_serializable(v):
typ = type(v)
return (
v is None
or typ is str
or typ is bool
or typ is bytes
or typ is int
or typ is float
or isinstance(v, dict)
and all(map(_is_msgpack_serializable, v.values()))
and all(type(x) is str for x in v.keys())
or isinstance(v, (list, tuple))
and all(map(_is_msgpack_serializable, v))
)
def _is_dumpable(v):
typ = type(v)
return (
v is None
or typ is str
or typ is bool
or typ is bytes
or typ is int
or typ is float
or typ is Pickled
or typ is Serialize
or typ is Serialized
or typ is ToPickle
or isinstance(v, dict)
and all(map(_is_dumpable, v.values()))
and all(type(x) is str for x in v.keys())
or isinstance(v, (list, tuple))
and all(map(_is_dumpable, v))
)
| Pickled |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 5027,
"end": 5757
} | class ____:
foo: InitVar[str]
bar: str
MyDataClass(foo='foo', bar='bar')
def get_my_custom_validator(field_name: str) -> Any:
@validator(field_name, allow_reuse=True)
def my_custom_validator(cls: Any, v: int) -> int:
return v
return my_custom_validator
def foo() -> None:
class MyModel(BaseModel):
number: int
custom_validator = get_my_custom_validator('number') # type: ignore[pydantic-field]
@model_validator(mode='before')
@classmethod
def validate_before(cls, values: Any) -> Any:
return values
@model_validator(mode='after')
def validate_after(self) -> Self:
return self
MyModel(number=2)
| MyDataClass |
python | google__pytype | pytype/abstract/abstract_test.py | {
"start": 1236,
"end": 7399
} | class ____(AbstractTestBase):
def setUp(self):
super().setUp()
self._is_instance = special_builtins.IsInstance.make(self._ctx)
# Easier access to some primitive instances.
self._bool = self._ctx.convert.primitive_instances[bool]
self._int = self._ctx.convert.primitive_instances[int]
self._str = self._ctx.convert.primitive_instances[str]
# Values that represent primitive classes.
self._obj_class = self._ctx.convert.primitive_classes[object]
self._int_class = self._ctx.convert.primitive_classes[int]
self._str_class = self._ctx.convert.primitive_classes[str]
def assert_call(self, expected, left, right):
"""Check that call() returned the desired results.
Args:
expected: A dict from values to source sets, where a source set is
represented by the sorted binding names separated by spaces, for example
"left:0 right:1" would indicate binding #0 of variable "left" and
binding #1 of variable "right".
left: A Variable to use as the first arg to call().
right: A Variable to use as the second arg to call().
"""
name_map = {left: "left", right: "right"}
node, result = self._is_instance.call(
self._node,
None,
function.Args((left, right), self.new_dict(), None, None),
)
self.assertIn(node, self._node.outgoing)
result_map = {}
# Turning source sets into canonical string representations of the binding
# names makes it much easier to debug failures.
for b in result.bindings:
terms = set()
for o in b.origins:
self.assertEqual(node, o.where)
for sources in o.source_sets:
terms.add(
" ".join(
sorted(
"%s:%d"
% (name_map[b.variable], b.variable.bindings.index(b))
for b in sources
)
)
)
result_map[b.data] = terms
self.assertEqual(expected, result_map)
def test_call_single_bindings(self):
right = self.new_var(self._str_class)
left = self.new_var(self._str)
self.assert_call({self._ctx.convert.true: {"left:0 right:0"}}, left, right)
left = self.new_var(self._int)
self.assert_call({self._ctx.convert.false: {"left:0 right:0"}}, left, right)
left = self.new_var(abstract.Unknown(self._ctx))
self.assert_call({self._bool: {"left:0 right:0"}}, left, right)
def test_call_multiple_bindings(self):
left = self.new_var(self._int, self._str)
right = self.new_var(self._int_class, self._str_class)
self.assert_call(
{
self._ctx.convert.true: {"left:0 right:0", "left:1 right:1"},
self._ctx.convert.false: {"left:0 right:1", "left:1 right:0"},
},
left,
right,
)
def test_call_wrong_argcount(self):
node, result = self._is_instance.call(
self._node, None, function.Args((), self.new_dict(), None, None)
)
self.assertEqual(self._node, node)
self.assertIsInstance(
abstract_utils.get_atomic_value(result), abstract.Unsolvable
)
self.assertRegex(str(self._ctx.errorlog), "missing-parameter")
def test_call_wrong_keywords(self):
x = self.new_var(abstract.Unknown(self._ctx))
node, result = self._is_instance.call(
self._node,
None,
function.Args((x, x), self.new_dict(foo=x), None, None),
)
self.assertEqual(self._node, node)
self.assertIsInstance(
abstract_utils.get_atomic_value(result), abstract.Unsolvable
)
self.assertRegex(
str(self._ctx.errorlog), r"foo.*isinstance.*\[wrong-keyword-args\]"
)
def test_is_instance(self):
def check(expected, left, right):
self.assertEqual(expected, self._is_instance._is_instance(left, right))
# Unknown and Unsolvable are ambiguous.
check(None, abstract.Unknown(self._ctx), self._obj_class)
check(None, abstract.Unsolvable(self._ctx), self._obj_class)
# If the object's class has multiple bindings, result is ambiguous.
obj = abstract.SimpleValue("foo", self._ctx)
check(None, obj, self._obj_class)
obj.set_class(self._node, self.new_var(self._str_class, self._int_class))
check(None, obj, self._str_class)
# If the class_spec is not a class, result is ambiguous.
check(None, self._str, self._str)
# Result is True/False depending on if the class is in the object's mro.
check(True, self._str, self._obj_class)
check(True, self._str, self._str_class)
check(False, self._str, self._int_class)
def test_flatten(self):
def maybe_var(v):
return v if isinstance(v, cfg.Variable) else self.new_var(v)
def new_tuple(*args):
pyval = tuple(maybe_var(a) for a in args)
return self._ctx.convert.tuple_to_value(pyval)
def check(expected_ambiguous, expected_classes, value):
classes = []
ambiguous = abstract_utils.flatten(value, classes)
self.assertEqual(expected_ambiguous, ambiguous)
self.assertEqual(expected_classes, classes)
unknown = abstract.Unknown(self._ctx)
# Simple values.
check(False, [self._str_class], self._str_class)
check(True, [], self._str)
check(True, [], unknown)
# (str, int)
check(
False,
[self._str_class, self._int_class],
new_tuple(self._str_class, self._int_class),
)
# (str, ?, int)
check(
True,
[self._str_class, self._int_class],
new_tuple(self._str_class, unknown, self._int_class),
)
# (str, (int, object))
check(
False,
[self._str_class, self._int_class, self._obj_class],
new_tuple(self._str_class, new_tuple(self._int_class, self._obj_class)),
)
# (str, (?, object))
check(
True,
[self._str_class, self._obj_class],
new_tuple(self._str_class, new_tuple(unknown, self._obj_class)),
)
# A variable with multiple bindings is ambiguous.
# (str, int | object)
check(
True,
[self._str_class],
new_tuple(
self._str_class, self.new_var(self._int_class, self._obj_class)
),
)
| IsInstanceTest |
python | numpy__numpy | numpy/distutils/fcompiler/environment.py | {
"start": 73,
"end": 3080
} | class ____:
def __init__(self, distutils_section='ALL', **kw):
self._distutils_section = distutils_section
self._conf_keys = kw
self._conf = None
self._hook_handler = None
def dump_variable(self, name):
conf_desc = self._conf_keys[name]
hook, envvar, confvar, convert, append = conf_desc
if not convert:
convert = lambda x : x
print('%s.%s:' % (self._distutils_section, name))
v = self._hook_handler(name, hook)
print(' hook : %s' % (convert(v),))
if envvar:
v = os.environ.get(envvar, None)
print(' environ: %s' % (convert(v),))
if confvar and self._conf:
v = self._conf.get(confvar, (None, None))[1]
print(' config : %s' % (convert(v),))
def dump_variables(self):
for name in self._conf_keys:
self.dump_variable(name)
def __getattr__(self, name):
try:
conf_desc = self._conf_keys[name]
except KeyError:
raise AttributeError(
f"'EnvironmentConfig' object has no attribute '{name}'"
) from None
return self._get_var(name, conf_desc)
def get(self, name, default=None):
try:
conf_desc = self._conf_keys[name]
except KeyError:
return default
var = self._get_var(name, conf_desc)
if var is None:
var = default
return var
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert, append = conf_desc
if convert is None:
convert = lambda x: x
var = self._hook_handler(name, hook)
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
envvar_contents = convert(envvar_contents)
if var and append:
if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
var.extend(envvar_contents)
else:
# NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
# to keep old (overwrite flags rather than append to
# them) behavior
var = envvar_contents
else:
var = envvar_contents
if confvar is not None and self._conf:
if confvar in self._conf:
source, confvar_contents = self._conf[confvar]
var = convert(confvar_contents)
return var
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
ec._hook_handler = hook_handler
return ec
def use_distribution(self, dist):
if isinstance(dist, Distribution):
self._conf = dist.get_option_dict(self._distutils_section)
else:
self._conf = dist
| EnvironmentConfig |
python | pytest-dev__pytest-django | tests/test_db_setup.py | {
"start": 4943,
"end": 8089
} | class ____:
db_settings: ClassVar = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "db_name",
"TEST": {"NAME": "test_custom_db_name"},
}
}
def test_sqlite_test_name_used(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
import pytest
from django.db import connections
from django import VERSION
@pytest.mark.django_db
def test_a():
(conn, ) = connections.all()
assert conn.vendor == 'sqlite'
print(conn.settings_dict)
assert conn.settings_dict['NAME'] == 'test_custom_db_name'
"""
)
result = django_pytester.runpytest_subprocess("--tb=short", "-v")
assert result.ret == 0
result.stdout.fnmatch_lines(["*test_a*PASSED*"])
def test_xdist_with_reuse(django_pytester: DjangoPytester) -> None:
pytest.importorskip("xdist")
skip_if_sqlite_in_memory()
drop_database("gw0")
drop_database("gw1")
assert not db_exists("gw0")
assert not db_exists("gw1")
django_pytester.create_test_module(
"""
import pytest
from .app.models import Item
def _check(settings):
# Make sure that the database name looks correct
db_name = settings.DATABASES['default']['NAME']
assert db_name.endswith('_gw0') or db_name.endswith('_gw1')
assert Item.objects.count() == 0
Item.objects.create(name='foo')
assert Item.objects.count() == 1
@pytest.mark.django_db
def test_a(settings):
_check(settings)
@pytest.mark.django_db
def test_b(settings):
_check(settings)
@pytest.mark.django_db
def test_c(settings):
_check(settings)
@pytest.mark.django_db
def test_d(settings):
_check(settings)
"""
)
result = django_pytester.runpytest_subprocess("-vv", "-n2", "-s", "--reuse-db")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
result.stdout.fnmatch_lines(["*PASSED*test_b*"])
result.stdout.fnmatch_lines(["*PASSED*test_c*"])
result.stdout.fnmatch_lines(["*PASSED*test_d*"])
assert db_exists("gw0")
assert db_exists("gw1")
result = django_pytester.runpytest_subprocess("-vv", "-n2", "-s", "--reuse-db")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
result.stdout.fnmatch_lines(["*PASSED*test_b*"])
result.stdout.fnmatch_lines(["*PASSED*test_c*"])
result.stdout.fnmatch_lines(["*PASSED*test_d*"])
result = django_pytester.runpytest_subprocess("-vv", "-n2", "-s", "--reuse-db", "--create-db")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
result.stdout.fnmatch_lines(["*PASSED*test_b*"])
result.stdout.fnmatch_lines(["*PASSED*test_c*"])
result.stdout.fnmatch_lines(["*PASSED*test_d*"])
# Cleanup.
drop_database("gw0")
drop_database("gw1")
| TestSqlite |
python | facelessuser__soupsieve | tests/test_level4/test_future.py | {
"start": 51,
"end": 803
} | class ____(util.TestCase):
"""Test future selectors."""
MARKUP = """
<body>
<div id="div">
<p id="0">Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
</body>
"""
def test_future(self):
"""Test future (should match nothing)."""
self.assert_selector(
self.MARKUP,
"p:future",
[],
flags=util.HTML
)
def test_not_future(self):
"""Test not future."""
self.assert_selector(
self.MARKUP,
"p:not(:future)",
["0"],
flags=util.HTML
)
| TestFuture |
python | python-openxml__python-docx | src/docx/section.py | {
"start": 16433,
"end": 18412
} | class ____(_BaseHeaderFooter):
"""Page header, used for all three types (default, even-page, and first-page).
Note that, like a document or table cell, a header must contain a minimum of one
paragraph and a new or otherwise "empty" header contains a single empty paragraph.
This first paragraph can be accessed as `header.paragraphs[0]` for purposes of
adding content to it. Using :meth:`add_paragraph()` by itself to add content will
leave an empty paragraph above the newly added one.
"""
def _add_definition(self):
"""Return newly-added header part."""
header_part, rId = self._document_part.add_header_part()
self._sectPr.add_headerReference(self._hdrftr_index, rId)
return header_part
@property
def _definition(self):
"""|HeaderPart| object containing content of this header."""
headerReference = self._sectPr.get_headerReference(self._hdrftr_index)
# -- currently this is never called when `._has_definition` evaluates False --
assert headerReference is not None
return self._document_part.header_part(headerReference.rId)
def _drop_definition(self):
"""Remove header definition associated with this section."""
rId = self._sectPr.remove_headerReference(self._hdrftr_index)
self._document_part.drop_header_part(rId)
@property
def _has_definition(self) -> bool:
"""True if a header is explicitly defined for this section."""
headerReference = self._sectPr.get_headerReference(self._hdrftr_index)
return headerReference is not None
@property
def _prior_headerfooter(self):
"""|_Header| proxy on prior sectPr element or None if this is first section."""
preceding_sectPr = self._sectPr.preceding_sectPr
return (
None
if preceding_sectPr is None
else _Header(preceding_sectPr, self._document_part, self._hdrftr_index)
)
| _Header |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_ops_test.py | {
"start": 30884,
"end": 31474
} | class ____(test_util.TensorFlowTestCase):
def testValuesInVariable(self):
indices = constant_op.constant([[0]], dtype=dtypes.int64)
values = variables.Variable([1], trainable=False, dtype=dtypes.float32)
shape = constant_op.constant([1], dtype=dtypes.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, shape)
sp_output = sparse_ops.sparse_add(sp_input, sp_input)
with test_util.force_cpu():
self.evaluate(variables.global_variables_initializer())
output = self.evaluate(sp_output)
self.assertAllEqual(output.values, [2])
| SparseAddTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py | {
"start": 1121,
"end": 1421
} | class ____(object):
__slots__ = ("name", "__dict__")
def __init__(self, name, middle_name):
self.name = name
self.middle_name = middle_name # [assigning-non-slot]
self.setup()
def setup(self):
pass
# https://github.com/astral-sh/ruff/issues/11358
| StudentF |
python | eventlet__eventlet | eventlet/green/subprocess.py | {
"start": 1310,
"end": 5575
} | class ____(subprocess_orig.Popen):
"""eventlet-friendly version of subprocess.Popen"""
# We do not believe that Windows pipes support non-blocking I/O. At least,
# the Python file objects stored on our base-class object have no
# setblocking() method, and the Python fcntl module doesn't exist on
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
# this __init__() override is to wrap the pipes for eventlet-friendly
# non-blocking I/O, don't even bother overriding it on Windows.
if not mswindows:
def __init__(self, args, bufsize=0, *argss, **kwds):
self.args = args
# Forward the call to base-class constructor
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
# Now wrap the pipes, if any. This logic is loosely borrowed from
# eventlet.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
if pipe is not None and type(pipe) != greenio.GreenPipe:
# https://github.com/eventlet/eventlet/issues/243
# AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
mode = getattr(pipe, 'mode', '')
if not mode:
if pipe.readable():
mode += 'r'
if pipe.writable():
mode += 'w'
# ValueError: can't have unbuffered text I/O
if bufsize == 0:
bufsize = -1
wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
def wait(self, timeout=None, check_interval=0.01):
# Instead of a blocking OS call, this version of wait() uses logic
# borrowed from the eventlet 0.2 processes.Process.wait() method.
if timeout is not None:
endtime = time.time() + timeout
try:
while True:
status = self.poll()
if status is not None:
return status
if timeout is not None and time.time() > endtime:
raise TimeoutExpired(self.args, timeout)
eventlet.sleep(check_interval)
except OSError as e:
if e.errno == errno.ECHILD:
# no child process, this happens if the child process
# already died and has been cleaned up
return -1
else:
raise
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
if not mswindows:
# don't want to rewrite the original _communicate() method, we
# just want a version that uses eventlet.green.select.select()
# instead of select.select().
_communicate = FunctionType(
subprocess_orig.Popen._communicate.__code__,
globals())
try:
_communicate_with_select = FunctionType(
subprocess_orig.Popen._communicate_with_select.__code__,
globals())
_communicate_with_poll = FunctionType(
subprocess_orig.Popen._communicate_with_poll.__code__,
globals())
except AttributeError:
pass
# Borrow subprocess.call() and check_call(), but patch them so they reference
# OUR Popen class rather than subprocess.Popen.
def patched_function(function):
new_function = FunctionType(function.__code__, globals())
new_function.__kwdefaults__ = function.__kwdefaults__
new_function.__defaults__ = function.__defaults__
return new_function
call = patched_function(subprocess_orig.call)
check_call = patched_function(subprocess_orig.check_call)
# check_output is Python 2.7+
if hasattr(subprocess_orig, 'check_output'):
__patched__.append('check_output')
check_output = patched_function(subprocess_orig.check_output)
del patched_function
# Keep exceptions identity.
# https://github.com/eventlet/eventlet/issues/413
CalledProcessError = subprocess_imported.CalledProcessError
del subprocess_imported
| Popen |
python | PyCQA__mccabe | mccabe.py | {
"start": 444,
"end": 1291
} | class ____(object):
"""Performs a depth-first walk of the AST."""
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in iter_child_nodes(node):
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
| ASTVisitor |
python | doocs__leetcode | solution/0200-0299/0263.Ugly Number/Solution.py | {
"start": 0,
"end": 201
} | class ____:
def isUgly(self, n: int) -> bool:
if n < 1:
return False
for x in [2, 3, 5]:
while n % x == 0:
n //= x
return n == 1
| Solution |
python | pydata__xarray | xarray/tests/test_dataarray.py | {
"start": 203570,
"end": 234504
} | class ____(TestReduce):
def test_min(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
minindex = [x if not np.isnan(x) else 0 for x in minindex]
expected0list = [
ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex)
]
expected0 = xr.concat(expected0list, dim="y")
result0 = ar.min(dim="x", keep_attrs=True)
assert_identical(result0, expected0)
# Default keeps attrs for reduction operations
result1 = ar.min(dim="x")
assert_identical(result1, expected0)
# Test explicitly dropping attrs
result1_no_attrs = ar.min(dim="x", keep_attrs=False)
expected1 = expected0.copy()
expected1.attrs = {}
assert_identical(result1_no_attrs, expected1)
result2 = ar.min(axis=1)
assert_identical(result2, expected0) # Default keeps attrs
minindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(minindex, nanindex, strict=True)
]
expected2list = [
ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex)
]
expected2 = xr.concat(expected2list, dim="y")
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
result3 = ar.min(dim="x", skipna=False)
assert_identical(result3, expected2)
def test_max(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
maxindex = [x if not np.isnan(x) else 0 for x in maxindex]
expected0list = [
ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex)
]
expected0 = xr.concat(expected0list, dim="y")
result0 = ar.max(dim="x", keep_attrs=True)
assert_identical(result0, expected0)
# Default keeps attrs for reduction operations
result1 = ar.max(dim="x")
assert_identical(result1, expected0)
# Test explicitly dropping attrs
result1_no_attrs = ar.max(dim="x", keep_attrs=False)
expected1 = expected0.copy()
expected1.attrs = {}
assert_identical(result1_no_attrs, expected1)
result2 = ar.max(axis=1)
assert_identical(result2, expected0) # Default keeps attrs
maxindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(maxindex, nanindex, strict=True)
]
expected2list = [
ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex)
]
expected2 = xr.concat(expected2list, dim="y")
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
result3 = ar.max(dim="x", skipna=False)
assert_identical(result3, expected2)
def test_argmin(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1])
indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords)
if np.isnan(minindex).any():
with pytest.raises(ValueError):
ar.argmin(dim="x")
return
expected0list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex)
]
expected0 = xr.concat(expected0list, dim="y")
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmin(dim="x")
assert_identical(result0, expected0)
result1 = ar.argmin(axis=1)
assert_identical(result1, expected0)
result2 = ar.argmin(dim="x", keep_attrs=True)
expected1 = expected0.copy()
expected1.attrs = self.attrs
assert_identical(result2, expected1)
minindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(minindex, nanindex, strict=True)
]
expected2list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex)
]
expected2 = xr.concat(expected2list, dim="y")
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
result3 = ar.argmin(dim="x", skipna=False)
assert_identical(result3, expected2)
def test_argmax(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
indarr_np = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1])
indarr = xr.DataArray(indarr_np, dims=ar.dims, coords=ar.coords)
if np.isnan(maxindex).any():
with pytest.raises(ValueError):
ar.argmax(dim="x")
return
expected0list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex)
]
expected0 = xr.concat(expected0list, dim="y")
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmax(dim="x")
assert_identical(result0, expected0)
result1 = ar.argmax(axis=1)
assert_identical(result1, expected0)
result2 = ar.argmax(dim="x", keep_attrs=True)
expected1 = expected0.copy()
expected1.attrs = self.attrs
assert_identical(result2, expected1)
maxindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(maxindex, nanindex, strict=True)
]
expected2list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex)
]
expected2 = xr.concat(expected2list, dim="y")
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
result3 = ar.argmax(dim="x", skipna=False)
assert_identical(result3, expected2)
@pytest.mark.parametrize(
"use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")]
)
def test_idxmin(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
use_dask: bool,
) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
if use_dask and x.dtype.kind == "M":
pytest.xfail("dask operation 'argmin' breaks when dtype is datetime64 (M)")
if x.dtype.kind == "O":
# TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices.
max_computes = 1
else:
max_computes = 0
ar0_raw = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
if use_dask:
ar0 = ar0_raw.chunk({})
else:
ar0 = ar0_raw
assert_identical(ar0, ar0)
# No dimension specified
with pytest.raises(ValueError):
ar0.idxmin()
# dim doesn't exist
with pytest.raises(KeyError):
ar0.idxmin(dim="Y")
assert_identical(ar0, ar0)
coordarr0 = xr.DataArray(
np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords
)
hasna = [np.isnan(x) for x in minindex]
coordarr1 = coordarr0.copy()
coordarr1[hasna, :] = 1
minindex0 = [x if not np.isnan(x) else 0 for x in minindex]
nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None]
expected0list = [
(coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex0)
]
expected0 = xr.concat(expected0list, dim="y")
expected0.name = "x"
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
# Default fill value (NaN)
with raise_if_dask_computes(max_computes=max_computes):
result0 = ar0.idxmin(dim="x")
assert_identical(result0, expected0)
# Manually specify NaN fill_value
with raise_if_dask_computes(max_computes=max_computes):
result1 = ar0.idxmin(dim="x", fill_value=np.nan)
assert_identical(result1, expected0)
# keep_attrs
with raise_if_dask_computes(max_computes=max_computes):
result2 = ar0.idxmin(dim="x", keep_attrs=True)
expected2 = expected0.copy()
expected2.attrs = self.attrs
assert_identical(result2, expected2)
# skipna=False
minindex3 = [
x if y is None or ar0.dtype.kind == "O" else y
for x, y in zip(minindex0, nanindex, strict=True)
]
expected3list = [
coordarr0.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex3)
]
expected3 = xr.concat(expected3list, dim="y")
expected3.name = "x"
expected3.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result3 = ar0.idxmin(dim="x", skipna=False)
assert_identical(result3, expected3)
# fill_value should be ignored with skipna=False
with raise_if_dask_computes(max_computes=max_computes):
result4 = ar0.idxmin(dim="x", skipna=False, fill_value=-100j)
assert_identical(result4, expected3)
# Float fill_value
nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None]
expected5list = [
(coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex0)
]
expected5 = xr.concat(expected5list, dim="y")
expected5.name = "x"
expected5.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result5 = ar0.idxmin(dim="x", fill_value=-1.1)
assert_identical(result5, expected5)
# Integer fill_value
nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None]
expected6list = [
(coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex0)
]
expected6 = xr.concat(expected6list, dim="y")
expected6.name = "x"
expected6.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result6 = ar0.idxmin(dim="x", fill_value=-1)
assert_identical(result6, expected6)
# Complex fill_value
nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None]
expected7list = [
(coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex0)
]
expected7 = xr.concat(expected7list, dim="y")
expected7.name = "x"
expected7.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result7 = ar0.idxmin(dim="x", fill_value=-5j)
assert_identical(result7, expected7)
@pytest.mark.parametrize(
"use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")]
)
def test_idxmax(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
use_dask: bool,
) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
if use_dask and x.dtype.kind == "M":
pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)")
if x.dtype.kind == "O":
# TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices.
max_computes = 1
else:
max_computes = 0
ar0_raw = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
if use_dask:
ar0 = ar0_raw.chunk({})
else:
ar0 = ar0_raw
# No dimension specified
with pytest.raises(ValueError):
ar0.idxmax()
# dim doesn't exist
with pytest.raises(KeyError):
ar0.idxmax(dim="Y")
ar1 = ar0.copy()
del ar1.coords["y"]
with pytest.raises(KeyError):
ar1.idxmax(dim="y")
coordarr0 = xr.DataArray(
np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords
)
hasna = [np.isnan(x) for x in maxindex]
coordarr1 = coordarr0.copy()
coordarr1[hasna, :] = 1
maxindex0 = [x if not np.isnan(x) else 0 for x in maxindex]
nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None]
expected0list = [
(coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex0)
]
expected0 = xr.concat(expected0list, dim="y")
expected0.name = "x"
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
# Default fill value (NaN)
with raise_if_dask_computes(max_computes=max_computes):
result0 = ar0.idxmax(dim="x")
assert_identical(result0, expected0)
# Manually specify NaN fill_value
with raise_if_dask_computes(max_computes=max_computes):
result1 = ar0.idxmax(dim="x", fill_value=np.nan)
assert_identical(result1, expected0)
# keep_attrs
with raise_if_dask_computes(max_computes=max_computes):
result2 = ar0.idxmax(dim="x", keep_attrs=True)
expected2 = expected0.copy()
expected2.attrs = self.attrs
assert_identical(result2, expected2)
# skipna=False
maxindex3 = [
x if y is None or ar0.dtype.kind == "O" else y
for x, y in zip(maxindex0, nanindex, strict=True)
]
expected3list = [
coordarr0.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex3)
]
expected3 = xr.concat(expected3list, dim="y")
expected3.name = "x"
expected3.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result3 = ar0.idxmax(dim="x", skipna=False)
assert_identical(result3, expected3)
# fill_value should be ignored with skipna=False
with raise_if_dask_computes(max_computes=max_computes):
result4 = ar0.idxmax(dim="x", skipna=False, fill_value=-100j)
assert_identical(result4, expected3)
# Float fill_value
nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None]
expected5list = [
(coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex0)
]
expected5 = xr.concat(expected5list, dim="y")
expected5.name = "x"
expected5.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result5 = ar0.idxmax(dim="x", fill_value=-1.1)
assert_identical(result5, expected5)
# Integer fill_value
nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None]
expected6list = [
(coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex0)
]
expected6 = xr.concat(expected6list, dim="y")
expected6.name = "x"
expected6.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result6 = ar0.idxmax(dim="x", fill_value=-1)
assert_identical(result6, expected6)
# Complex fill_value
nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None]
expected7list = [
(coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex0)
]
expected7 = xr.concat(expected7list, dim="y")
expected7.name = "x"
expected7.attrs = self.attrs # Default keeps attrs for reduction operations
with raise_if_dask_computes(max_computes=max_computes):
result7 = ar0.idxmax(dim="x", fill_value=-5j)
assert_identical(result7, expected7)
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmin_dim(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1])
indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords)
if np.isnan(minindex).any():
with pytest.raises(ValueError):
ar.argmin(dim="x")
return
expected0list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex)
]
expected0 = {"x": xr.concat(expected0list, dim="y")}
expected0[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmin(dim=["x"])
for key in expected0:
assert_identical(result0[key], expected0[key])
result1 = ar.argmin(dim=["x"], keep_attrs=True)
expected1 = deepcopy(expected0)
expected1["x"].attrs = self.attrs
for key in expected1:
assert_identical(result1[key], expected1[key])
minindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(minindex, nanindex, strict=True)
]
expected2list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(minindex)
]
expected2 = {"x": xr.concat(expected2list, dim="y")}
expected2[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
result2 = ar.argmin(dim=["x"], skipna=False)
for key in expected2:
assert_identical(result2[key], expected2[key])
result3 = ar.argmin(...)
# TODO: remove cast once argmin typing is overloaded
min_xind = cast(DataArray, ar.isel(expected0).argmin())
expected3 = {
"y": DataArray(min_xind, attrs=self.attrs),
"x": DataArray(minindex[min_xind.item()], attrs=self.attrs),
}
for key in expected3:
assert_identical(result3[key], expected3[key])
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmax_dim(
self,
x: np.ndarray,
minindex: list[int | float],
maxindex: list[int | float],
nanindex: list[int | None],
) -> None:
ar = xr.DataArray(
x,
dims=["y", "x"],
coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])},
attrs=self.attrs,
)
indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1])
indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords)
if np.isnan(maxindex).any():
with pytest.raises(ValueError):
ar.argmax(dim="x")
return
expected0list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex)
]
expected0 = {"x": xr.concat(expected0list, dim="y")}
expected0[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmax(dim=["x"])
for key in expected0:
assert_identical(result0[key], expected0[key])
result1 = ar.argmax(dim=["x"], keep_attrs=True)
expected1 = deepcopy(expected0)
expected1["x"].attrs = self.attrs
for key in expected1:
assert_identical(result1[key], expected1[key])
maxindex = [
x if y is None or ar.dtype.kind == "O" else y
for x, y in zip(maxindex, nanindex, strict=True)
]
expected2list = [
indarr.isel(y=yi).isel(x=indi, drop=True)
for yi, indi in enumerate(maxindex)
]
expected2 = {"x": xr.concat(expected2list, dim="y")}
expected2[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
result2 = ar.argmax(dim=["x"], skipna=False)
for key in expected2:
assert_identical(result2[key], expected2[key])
result3 = ar.argmax(...)
# TODO: remove cast once argmax typing is overloaded
max_xind = cast(DataArray, ar.isel(expected0).argmax())
expected3 = {
"y": DataArray(max_xind, attrs=self.attrs),
"x": DataArray(maxindex[max_xind.item()], attrs=self.attrs),
}
for key in expected3:
assert_identical(result3[key], expected3[key])
@pytest.mark.parametrize(
"x, minindices_x, minindices_y, minindices_z, minindices_xy, "
"minindices_xz, minindices_yz, minindices_xyz, maxindices_x, "
"maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, "
"maxindices_xyz, nanindices_x, nanindices_y, nanindices_z, nanindices_xy, "
"nanindices_xz, nanindices_yz, nanindices_xyz",
[
pytest.param(
np.array(
[
[[0, 1, 2, 0], [-2, -4, 2, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, -10, 5], [20, 0, 0, 0]],
]
),
{"x": np.array([[0, 2, 2, 0], [0, 0, 2, 0]])},
{"y": np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]])},
{"z": np.array([[0, 1], [0, 0], [2, 1]])},
{"x": np.array([0, 0, 2, 0]), "y": np.array([1, 1, 0, 0])},
{"x": np.array([2, 0]), "z": np.array([2, 1])},
{"y": np.array([1, 0, 0]), "z": np.array([1, 0, 2])},
{"x": np.array(2), "y": np.array(0), "z": np.array(2)},
{"x": np.array([[1, 0, 0, 2], [2, 1, 0, 1]])},
{"y": np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]])},
{"z": np.array([[2, 2], [0, 0], [3, 0]])},
{"x": np.array([2, 0, 0, 2]), "y": np.array([1, 0, 0, 0])},
{"x": np.array([2, 2]), "z": np.array([3, 0])},
{"y": np.array([0, 0, 1]), "z": np.array([2, 0, 0])},
{"x": np.array(2), "y": np.array(1), "z": np.array(0)},
{"x": np.array([[None, None, None, None], [None, None, None, None]])},
{
"y": np.array(
[
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
]
)
},
{"z": np.array([[None, None], [None, None], [None, None]])},
{
"x": np.array([None, None, None, None]),
"y": np.array([None, None, None, None]),
},
{"x": np.array([None, None]), "z": np.array([None, None])},
{"y": np.array([None, None, None]), "z": np.array([None, None, None])},
{"x": np.array(None), "y": np.array(None), "z": np.array(None)},
id="int",
),
pytest.param(
np.array(
[
[[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]],
[[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]],
[[np.nan] * 4, [np.nan] * 4],
]
),
{"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])},
{
"y": np.array(
[[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]]
)
},
{"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])},
{"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])},
{"x": np.array([1, 0]), "z": np.array([0, 1])},
{"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])},
{"x": np.array(0), "y": np.array(1), "z": np.array(1)},
{"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])},
{
"y": np.array(
[[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]]
)
},
{"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])},
{"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])},
{"x": np.array([0, 0]), "z": np.array([2, 2])},
{"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])},
{"x": np.array(0), "y": np.array(0), "z": np.array(0)},
{"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])},
{
"y": np.array(
[[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]]
)
},
{"z": np.array([[None, None], [1, None], [0, 0]])},
{"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])},
{"x": np.array([1, 2]), "z": np.array([1, 0])},
{"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])},
{"x": np.array(1), "y": np.array(0), "z": np.array(1)},
id="nan",
),
pytest.param(
np.array(
[
[[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]],
[[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]],
[[np.nan] * 4, [np.nan] * 4],
]
).astype("object"),
{"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])},
{
"y": np.array(
[[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]]
)
},
{"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])},
{"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])},
{"x": np.array([1, 0]), "z": np.array([0, 1])},
{"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])},
{"x": np.array(0), "y": np.array(1), "z": np.array(1)},
{"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])},
{
"y": np.array(
[[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]]
)
},
{"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])},
{"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])},
{"x": np.array([0, 0]), "z": np.array([2, 2])},
{"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])},
{"x": np.array(0), "y": np.array(0), "z": np.array(0)},
{"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])},
{
"y": np.array(
[[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]]
)
},
{"z": np.array([[None, None], [1, None], [0, 0]])},
{"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])},
{"x": np.array([1, 2]), "z": np.array([1, 0])},
{"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])},
{"x": np.array(1), "y": np.array(0), "z": np.array(1)},
id="obj",
),
pytest.param(
np.array(
[
[["2015-12-31", "2020-01-02"], ["2020-01-01", "2016-01-01"]],
[["2020-01-02", "2020-01-02"], ["2020-01-02", "2020-01-02"]],
[["1900-01-01", "1-02-03"], ["1900-01-02", "1-02-03"]],
],
dtype="datetime64[ns]",
),
{"x": np.array([[2, 2], [2, 2]])},
{"y": np.array([[0, 1], [0, 0], [0, 0]])},
{"z": np.array([[0, 1], [0, 0], [1, 1]])},
{"x": np.array([2, 2]), "y": np.array([0, 0])},
{"x": np.array([2, 2]), "z": np.array([1, 1])},
{"y": np.array([0, 0, 0]), "z": np.array([0, 0, 1])},
{"x": np.array(2), "y": np.array(0), "z": np.array(1)},
{"x": np.array([[1, 0], [1, 1]])},
{"y": np.array([[1, 0], [0, 0], [1, 0]])},
{"z": np.array([[1, 0], [0, 0], [0, 0]])},
{"x": np.array([1, 0]), "y": np.array([0, 0])},
{"x": np.array([0, 1]), "z": np.array([1, 0])},
{"y": np.array([0, 0, 1]), "z": np.array([1, 0, 0])},
{"x": np.array(0), "y": np.array(0), "z": np.array(1)},
{"x": np.array([[None, None], [None, None]])},
{"y": np.array([[None, None], [None, None], [None, None]])},
{"z": np.array([[None, None], [None, None], [None, None]])},
{"x": np.array([None, None]), "y": np.array([None, None])},
{"x": np.array([None, None]), "z": np.array([None, None])},
{"y": np.array([None, None, None]), "z": np.array([None, None, None])},
{"x": np.array(None), "y": np.array(None), "z": np.array(None)},
id="datetime",
),
],
)
| TestReduce2D |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.