language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pip | src/pip/_vendor/rich/columns.py | {
"start": 469,
"end": 7131
} | class ____(JupyterMixin):
"""Display renderables in neat columns.
Args:
renderables (Iterable[RenderableType]): Any number of Rich renderables (including str).
width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None.
padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1).
expand (bool, optional): Expand columns to full width. Defaults to False.
equal (bool, optional): Arrange in to equal sized columns. Defaults to False.
column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False.
right_to_left (bool, optional): Start column from right hand side. Defaults to False.
align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None.
title (TextType, optional): Optional title for Columns.
"""
def __init__(
self,
renderables: Optional[Iterable[RenderableType]] = None,
padding: PaddingDimensions = (0, 1),
*,
width: Optional[int] = None,
expand: bool = False,
equal: bool = False,
column_first: bool = False,
right_to_left: bool = False,
align: Optional[AlignMethod] = None,
title: Optional[TextType] = None,
) -> None:
self.renderables = list(renderables or [])
self.width = width
self.padding = padding
self.expand = expand
self.equal = equal
self.column_first = column_first
self.right_to_left = right_to_left
self.align: Optional[AlignMethod] = align
self.title = title
def add_renderable(self, renderable: RenderableType) -> None:
"""Add a renderable to the columns.
Args:
renderable (RenderableType): Any renderable object.
"""
self.renderables.append(renderable)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
render_str = console.render_str
renderables = [
render_str(renderable) if isinstance(renderable, str) else renderable
for renderable in self.renderables
]
if not renderables:
return
_top, right, _bottom, left = Padding.unpack(self.padding)
width_padding = max(left, right)
max_width = options.max_width
widths: Dict[int, int] = defaultdict(int)
column_count = len(renderables)
get_measurement = Measurement.get
renderable_widths = [
get_measurement(console, options, renderable).maximum
for renderable in renderables
]
if self.equal:
renderable_widths = [max(renderable_widths)] * len(renderable_widths)
def iter_renderables(
column_count: int,
) -> Iterable[Tuple[int, Optional[RenderableType]]]:
item_count = len(renderables)
if self.column_first:
width_renderables = list(zip(renderable_widths, renderables))
column_lengths: List[int] = [item_count // column_count] * column_count
for col_no in range(item_count % column_count):
column_lengths[col_no] += 1
row_count = (item_count + column_count - 1) // column_count
cells = [[-1] * column_count for _ in range(row_count)]
row = col = 0
for index in range(item_count):
cells[row][col] = index
column_lengths[col] -= 1
if column_lengths[col]:
row += 1
else:
col += 1
row = 0
for index in chain.from_iterable(cells):
if index == -1:
break
yield width_renderables[index]
else:
yield from zip(renderable_widths, renderables)
# Pad odd elements with spaces
if item_count % column_count:
for _ in range(column_count - (item_count % column_count)):
yield 0, None
table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False)
table.expand = self.expand
table.title = self.title
if self.width is not None:
column_count = (max_width) // (self.width + width_padding)
for _ in range(column_count):
table.add_column(width=self.width)
else:
while column_count > 1:
widths.clear()
column_no = 0
for renderable_width, _ in iter_renderables(column_count):
widths[column_no] = max(widths[column_no], renderable_width)
total_width = sum(widths.values()) + width_padding * (
len(widths) - 1
)
if total_width > max_width:
column_count = len(widths) - 1
break
else:
column_no = (column_no + 1) % column_count
else:
break
get_renderable = itemgetter(1)
_renderables = [
get_renderable(_renderable)
for _renderable in iter_renderables(column_count)
]
if self.equal:
_renderables = [
None
if renderable is None
else Constrain(renderable, renderable_widths[0])
for renderable in _renderables
]
if self.align:
align = self.align
_Align = Align
_renderables = [
None if renderable is None else _Align(renderable, align)
for renderable in _renderables
]
right_to_left = self.right_to_left
add_row = table.add_row
for start in range(0, len(_renderables), column_count):
row = _renderables[start : start + column_count]
if right_to_left:
row = row[::-1]
add_row(*row)
yield table
if __name__ == "__main__": # pragma: no cover
import os
console = Console()
files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))]
columns = Columns(files, padding=(0, 1), expand=False, equal=False)
console.print(columns)
console.rule()
columns.column_first = True
console.print(columns)
columns.right_to_left = True
console.rule()
console.print(columns)
| Columns |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_period.py | {
"start": 19409,
"end": 20836
} | class ____:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")],
"B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
"B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
| TestPeriodFrameArithmetic |
python | scipy__scipy | scipy/sparse/linalg/_eigen/tests/test_svds.py | {
"start": 35858,
"end": 35961
} | class ____(SVDSCommonTests):
def setup_method(self):
self.solver = 'lobpcg'
| Test_SVDS_LOBPCG |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 106434,
"end": 106564
} | class ____(BaseModel, extra="forbid"):
replicate_points: "ReplicatePoints" = Field(..., description="")
| ReplicatePointsOperation |
python | streamlit__streamlit | lib/tests/streamlit/form_test.py | {
"start": 7133,
"end": 10449
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall form protos."""
def test_marshall_form(self):
"""Creating a form should result in the expected protobuf data."""
# Test with clear_on_submit=True
with st.form(key="foo", clear_on_submit=True):
pass
assert len(self.get_all_deltas_from_queue()) == 1
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "foo"
assert form_proto.form.clear_on_submit
assert form_proto.form.enter_to_submit
assert form_proto.form.border
self.clear_queue()
# Test with clear_on_submit=False
with st.form(key="bar", clear_on_submit=False):
pass
assert len(self.get_all_deltas_from_queue()) == 1
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "bar"
assert not form_proto.form.clear_on_submit
def test_form_enter_to_submit(self):
"""Test that a form can be created with enter_to_submit=False."""
# Test with enter_to_submit=False
with st.form(key="foo", enter_to_submit=False):
pass
assert len(self.get_all_deltas_from_queue()) == 1
form_proto = self.get_delta_from_queue(0).add_block
assert not form_proto.form.enter_to_submit
def test_form_without_border(self):
"""Test that a form can be created without a border."""
# Test with clear_on_submit=True
with st.form(key="foo", clear_on_submit=True, border=False):
pass
assert len(self.get_all_deltas_from_queue()) == 1
form_proto = self.get_delta_from_queue(0).add_block
assert not form_proto.form.border
def test_multiple_forms_same_key(self):
"""Multiple forms with the same key are not allowed."""
with pytest.raises(StreamlitAPIException) as ctx:
st.form(key="foo")
st.form(key="foo")
assert "There are multiple identical forms with `key='foo'`" in str(ctx.value)
def test_multiple_forms_same_labels_different_keys(self):
"""Multiple forms with different keys are allowed."""
try:
st.form(key="foo")
st.form(key="bar")
except Exception:
self.fail("Forms with same labels and different keys failed to create.")
def test_form_in_form(self):
"""Test that forms cannot be nested in other forms."""
with pytest.raises(StreamlitAPIException) as ctx:
with st.form("foo"):
with st.form("bar"):
pass
assert str(ctx.value) == "Forms cannot be nested in other forms."
def test_button_in_form(self):
"""Test that buttons are not allowed in forms."""
with pytest.raises(StreamlitAPIException) as ctx:
with st.form("foo"):
st.button("foo")
assert "`st.button()` can't be used in an `st.form()`" in str(ctx.value)
def test_form_block_data(self):
"""Test that a form creates a block element with correct data."""
form_data = st.form(key="bar")._form_data
assert form_data.form_id == "bar"
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
| FormMarshallingTest |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/supporting_types.py | {
"start": 5406,
"end": 5801
} | class ____(SerializableDictDot):
"""A holder for ExpectationDiagnosticCheckMessages, grouping them by maturity level. Used within the ExpectationDiagnostic object.""" # noqa: E501 # FIXME CoP
experimental: List[ExpectationDiagnosticCheckMessage]
beta: List[ExpectationDiagnosticCheckMessage]
production: List[ExpectationDiagnosticCheckMessage]
| ExpectationDiagnosticMaturityMessages |
python | neetcode-gh__leetcode | python/0374-guess-number-higher-or-lower.py | {
"start": 0,
"end": 405
} | class ____:
def guessNumber(self, n: int) -> int:
# return a num btw 1,..,n
low = 1
high = n
while True:
mid = low + (high - low) // 2
myGuess = guess(mid)
if myGuess == 1:
low = mid + 1
elif myGuess == -1:
high = mid - 1
else:
return mid
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoom/components.py | {
"start": 1065,
"end": 3500
} | class ____(NoAuth):
config: Config
account_id: Union[InterpolatedString, str]
client_id: Union[InterpolatedString, str]
client_secret: Union[InterpolatedString, str]
authorization_endpoint: Union[InterpolatedString, str]
_instance = None
_generate_token_time = 0
_access_token = None
_grant_type = "account_credentials"
def __post_init__(self, parameters: Mapping[str, Any]):
self._account_id = InterpolatedString.create(self.account_id, parameters=parameters).eval(self.config)
self._client_id = InterpolatedString.create(self.client_id, parameters=parameters).eval(self.config)
self._client_secret = InterpolatedString.create(self.client_secret, parameters=parameters).eval(self.config)
self._authorization_endpoint = InterpolatedString.create(self.authorization_endpoint, parameters=parameters).eval(self.config)
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Attach the page access token to params to authenticate on the HTTP request"""
if self._access_token is None or ((time.time() - self._generate_token_time) > BEARER_TOKEN_EXPIRES_IN):
self._generate_token_time = time.time()
self._access_token = self.generate_access_token()
headers = {"Authorization": f"Bearer {self._access_token}", "Content-type": "application/json"}
request.headers.update(headers)
return request
@property
def auth_header(self) -> str:
return "Authorization"
@property
def token(self) -> Optional[str]:
return self._access_token if self._access_token else None
def generate_access_token(self) -> str:
self._generate_token_time = time.time()
try:
token = base64.b64encode(f"{self._client_id}:{self._client_secret}".encode("ascii")).decode("utf-8")
headers = {"Authorization": f"Basic {token}", "Content-type": "application/json"}
rest = requests.post(
url=f"{self._authorization_endpoint}?grant_type={self._grant_type}&account_id={self._account_id}", headers=headers
)
if rest.status_code != HTTPStatus.OK:
raise HTTPError(rest.text)
return rest.json().get("access_token")
except Exception as e:
raise Exception(f"Error while generating access token: {e}") from e
| ServerToServerOauthAuthenticator |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 2666,
"end": 3053
} | class ____:
__slots__ = ['_err']
data_descriptor = DataDescriptor('_err')
non_data_descriptor = NonDataDescriptor()
missing_descriptor = Unknown()
def dont_emit_for_descriptors():
inst = SlotsWithDescriptor()
# This should not emit, because attr is
# a data descriptor
inst.data_descriptor = 'foo'
inst.non_data_descriptor = 'lala'
| SlotsWithDescriptor |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 8165,
"end": 10416
} | class ____(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.d_o = self.d.astype(object)
self.e = self.d.copy()
self.e_o = self.d_o.copy()
self.cond = (self.d > 5000)
size = 1024 * 1024 // 8
rnd_array = np.random.rand(size)
self.rand_cond_01 = rnd_array > 0.01
self.rand_cond_20 = rnd_array > 0.20
self.rand_cond_30 = rnd_array > 0.30
self.rand_cond_40 = rnd_array > 0.40
self.rand_cond_50 = rnd_array > 0.50
self.all_zeros = np.zeros(size, dtype=bool)
self.all_ones = np.ones(size, dtype=bool)
self.rep_zeros_2 = np.arange(size) % 2 == 0
self.rep_zeros_4 = np.arange(size) % 4 == 0
self.rep_zeros_8 = np.arange(size) % 8 == 0
self.rep_ones_2 = np.arange(size) % 2 > 0
self.rep_ones_4 = np.arange(size) % 4 > 0
self.rep_ones_8 = np.arange(size) % 8 > 0
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_object(self):
# object and byteswapped arrays have a
# special slow path in the where internals
np.where(self.cond, self.d_o, self.e_o)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
def time_all_zeros(self):
np.where(self.all_zeros)
def time_random_01_percent(self):
np.where(self.rand_cond_01)
def time_random_20_percent(self):
np.where(self.rand_cond_20)
def time_random_30_percent(self):
np.where(self.rand_cond_30)
def time_random_40_percent(self):
np.where(self.rand_cond_40)
def time_random_50_percent(self):
np.where(self.rand_cond_50)
def time_all_ones(self):
np.where(self.all_ones)
def time_interleaved_zeros_x2(self):
np.where(self.rep_zeros_2)
def time_interleaved_zeros_x4(self):
np.where(self.rep_zeros_4)
def time_interleaved_zeros_x8(self):
np.where(self.rep_zeros_8)
def time_interleaved_ones_x2(self):
np.where(self.rep_ones_2)
def time_interleaved_ones_x4(self):
np.where(self.rep_ones_4)
def time_interleaved_ones_x8(self):
np.where(self.rep_ones_8)
| Where |
python | numpy__numpy | numpy/polynomial/tests/test_symbol.py | {
"start": 3437,
"end": 3735
} | class ____:
p = poly.Polynomial([1, 2, 3], symbol='x')
def test_eq(self):
other = poly.Polynomial([1, 2, 3], symbol='x')
assert_(self.p == other)
def test_neq(self):
other = poly.Polynomial([1, 2, 3], symbol='y')
assert_(not self.p == other)
| TestEquality |
python | davidhalter__jedi | jedi/plugins/stdlib.py | {
"start": 12865,
"end": 13508
} | class ____(TreeArgumentsWrapper):
def __init__(self, klass, arguments):
super().__init__(arguments)
self._class = klass
def unpack(self, func=None):
yield None, LazyKnownValue(self._class)
for values in self._wrapped_arguments.unpack(func):
yield values
@argument_clinic('sequence, /', want_value=True, want_arguments=True)
def builtins_classmethod(functions, value, arguments):
return ValueSet(
ClassMethodObject(class_method_object, function)
for class_method_object in value.py__call__(arguments=arguments)
for function in functions
)
| ClassMethodArguments |
python | google__jax | tests/sparse_test.py | {
"start": 46718,
"end": 48937
} | class ____(sptu.SparseTestCase):
@jtu.sample_product(
[
dict(n_batch=n_batch, n_dense=n_dense, expected_nse=expected_nse)
for n_batch, n_dense, expected_nse in [
(0, 0, 4),
(1, 0, 2),
(0, 1, 2),
(2, 0, 1),
(1, 1, 1),
(0, 2, 1),
]
],
dtype=all_dtypes,
)
def test_count_stored_elements(self, dtype, n_batch, n_dense, expected_nse):
"""Test counting nse."""
mat = np.array([[1, 0, 2, 0], [0, 0, 0, 0], [0, 3, 0, 4]], dtype=dtype)
actual_nse = sparse.util._count_stored_elements(
mat, n_batch=n_batch, n_dense=n_dense)
self.assertEqual(expected_nse, actual_nse)
@jtu.sample_product(
[
dict(n_batch=n_batch, n_dense=n_dense)
for n_batch in range(3)
for n_dense in range(3 - n_batch)
],
dtype=all_dtypes,
)
def test_count_stored_elements_empty(self, dtype, n_batch, n_dense):
mat = np.empty((0, 4), dtype=dtype)
actual_nse = sparse.util._count_stored_elements(
mat, n_batch=n_batch, n_dense=n_dense)
self.assertEqual(0, actual_nse)
@jtu.sample_product(
[
dict(n_batch=n_batch, n_dense=n_dense, expected_nse=expected_nse)
for n_batch, n_dense, expected_nse in [
(0, 0, 14),
(1, 0, np.array([6, 8])),
(0, 1, 9),
(2, 0, np.array([[3, 3], [4, 4]])),
]
],
dtype=all_dtypes,
)
def test_count_stored_elements_per_batch(self, dtype, n_batch, n_dense,
expected_nse):
"""Test counting nse."""
mat = np.array([[[[1, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 3]],
[[0, 1, 2, 0], [0, 0, 0, 0], [0, 0, 0, 3]]],
[[[1, 0, 2, 0], [0, 0, 0, 0], [0, 3, 0, 4]],
[[0, 0, 0, 1], [0, 0, 2, 0], [3, 0, 0, 4]]]], dtype=dtype)
actual_nse = sparse.util._count_stored_elements_per_batch(
mat, n_batch=n_batch, n_dense=n_dense)
self.assertArraysEqual(expected_nse, actual_nse, check_dtypes=False)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| SparseUtilTest |
python | google__pytype | pytype/tools/xref/indexer.py | {
"start": 8132,
"end": 8291
} | class ____:
"""A link between a function def and the defs of its params."""
def_id: str
param_id: str
position: int
@dataclasses.dataclass
| FunctionParam |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 829,
"end": 937
} | class ____(BaseModel):
type: Literal["refusal.delta"]
delta: str
snapshot: str
| RefusalDeltaEvent |
python | ansible__ansible | lib/ansible/playbook/block.py | {
"start": 1286,
"end": 14383
} | class ____(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatable):
# main block fields containing the task lists
block = NonInheritableFieldAttribute(isa='list', default=list)
rescue = NonInheritableFieldAttribute(isa='list', default=list)
always = NonInheritableFieldAttribute(isa='list', default=list)
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
# otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._parent = None
self._dep_chain = None
self._use_handlers = use_handlers
self._implicit = implicit
if task_include:
self._parent = task_include
elif parent_block:
self._parent = parent_block
super(Block, self).__init__()
def __repr__(self):
return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
def __eq__(self, other):
"""object comparison based on _uuid"""
return self._uuid == other._uuid
def __ne__(self, other):
"""object comparison based on _uuid"""
return self._uuid != other._uuid
def get_vars(self):
"""
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
"""
all_vars = {}
if self._parent:
all_vars |= self._parent.get_vars()
all_vars |= self.vars.copy()
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
"""
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
"""
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
# FIXME: these do nothing but augment the exception message; DRY and nuke
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds) from ex
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds) from ex
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as ex:
raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds) from ex
def _validate_always(self, attr, name, value):
if value and not self.block:
raise AnsibleParserError("'%s' keyword cannot be used without 'block'" % name, obj=self._ds)
_validate_rescue = _validate_always
def get_dep_chain(self):
if self._dep_chain is None:
if self._parent:
return self._parent.get_dep_chain()
else:
return None
else:
return self._dep_chain[:]
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
new_task = task.copy(exclude_parent=True, exclude_tasks=exclude_tasks)
if task._parent:
new_task._parent = task._parent.copy(exclude_tasks=True)
if task._parent == new_block:
# If task._parent is the same as new_block, just replace it
new_task._parent = new_block
else:
# task may not be a direct child of new_block, search for the correct place to insert new_block
cur_obj = new_task._parent
while cur_obj._parent and cur_obj._parent != new_block:
cur_obj = cur_obj._parent
cur_obj._parent = new_block
else:
new_task._parent = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
if self._dep_chain is not None:
new_me._dep_chain = self._dep_chain[:]
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=True)
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.validate()
return new_me
def set_loader(self, loader):
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, omit=False):
"""
Generic logic to get the attribute or parent attribute for a block value.
"""
fattr = self.fattributes[attr]
extend = fattr.extend
prepend = fattr.prepend
try:
# omit self, and only get parent values
if omit:
value = Sentinel
else:
value = getattr(self, f'_{attr}', Sentinel)
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
try:
if getattr(_parent, 'statically_loaded', True):
if hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = getattr(_parent, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except AttributeError:
pass
if self._role and (value is Sentinel or extend):
try:
parent_value = getattr(self._role, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
dep_chain = self.get_dep_chain()
if dep_chain and (value is Sentinel or extend):
dep_chain.reverse()
for dep in dep_chain:
dep_value = getattr(dep, f'_{attr}', Sentinel)
if extend:
value = self._extend_value(value, dep_value, prepend)
else:
value = dep_value
if value is not Sentinel and not extend:
break
except AttributeError:
pass
if self._play and (value is Sentinel or extend):
try:
play_value = getattr(self._play, f'_{attr}', Sentinel)
if play_value is not Sentinel:
if extend:
value = self._extend_value(value, play_value, prepend)
else:
value = play_value
except AttributeError:
pass
except KeyError:
pass
return value
def filter_tagged_tasks(self, all_vars):
"""
Creates a new block, with task lists filtered based on the tags.
"""
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
filtered_block = evaluate_block(task)
if filtered_block.has_tasks():
tmp_list.append(filtered_block)
elif task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = block.copy(exclude_parent=True, exclude_tasks=True)
new_block._parent = block._parent
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def get_tasks(self):
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
tmp_list.extend(evaluate_block(task))
else:
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
rv = evaluate_and_append_task(block.block)
rv.extend(evaluate_and_append_task(block.rescue))
rv.extend(evaluate_and_append_task(block.always))
return rv
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
def get_include_params(self):
if self._parent:
return self._parent.get_include_params()
else:
return dict()
def all_parents_static(self):
"""
Determine if all of the parents of this block were statically loaded
or not. Since Task/TaskInclude objects may be in the chain, they simply
call their parents all_parents_static() method. Only Block objects in
the chain check the statically_loaded value of the parent.
"""
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
return False
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
| Block |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_data_forwarding_details.py | {
"start": 366,
"end": 982
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-forwarding-details"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def get_response(self, *args, **kwargs):
"""
Override get_response to always add the required feature flag.
"""
with self.feature(
{
"organizations:data-forwarding-revamp-access": True,
"organizations:data-forwarding": True,
}
):
return super().get_response(*args, **kwargs)
@region_silo_test
| DataForwardingDetailsEndpointTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_range.py | {
"start": 1108,
"end": 26269
} | class ____(__TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {}; '
'expected {}, got {}'.format(test_id, i, y, x))
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_range_constructor_error_messages(self):
with self.assertRaisesRegex(
TypeError,
"range expected at least 1 argument, got 0"
):
range()
with self.assertRaisesRegex(
TypeError,
"range expected at most 3 arguments, got 6"
):
range(1, 2, 3, 4, 5, 6)
def test_large_operands(self):
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
self.assertTrue(x)
# Now test range() with longs
for x in [range(-2**100),
range(0, -2**100),
range(0, 2**100, -1)]:
self.assertEqual(list(x), [])
self.assertFalse(x)
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a+c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b-c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a-c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize**10
c = 2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize**10
b = 0
c = -2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1., 1., 1.)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10**20).index(1), 1)
self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1)
self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87)
self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86)
self.assertEqual(range(10).index(ALWAYS_EQ), 0)
def test_user_index_method(self):
bignum = 2*sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Test use of user-defined classes in slice indices.
self.assertEqual(range(10)[:I(5)], range(5))
with self.assertRaises(RuntimeError):
range(0, 10)[:IX()]
with self.assertRaises(TypeError):
range(0, 10)[:IN()]
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10**20).count(1), 1)
self.assertEqual(range(10**20).count(10**20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2**100, 2).count(2**87), 0)
self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1)
# self.assertEqual(range(10).count(ALWAYS_EQ), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), 'range(0, 1)')
self.assertEqual(repr(range(1, 2)), 'range(1, 2)')
self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)')
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, test=t):
r = range(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_iterator_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3),
(-2, 2, 2)]
for M in 2**31, 2**63:
testcases += [
(M-3, M-1), (4*M, 4*M+2),
(M-2, M-1, 2), (-M+1, -M, -2),
(1, 2, M-1), (-1, -2, -M),
(1, M-1, M-1), (-1, -M, -M),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, t=t):
it = itorg = iter(range(*t))
data = list(range(*t))
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
@skipIfTorchDynamo("infinite loop")
def test_iterator_pickling_overflowing_index(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
it = iter(range(2**32 + 2))
it.__setstate__(2**32 + 1) # undocumented way to advance an iterator
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(next(it), 2**32 + 1)
def test_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(2**65, 2**65+2)
i = iter(r)
while True:
r = next(i)
if r == 2**65+1:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_large_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(20)
i = iter(r)
while True:
r = next(i)
if r == 19:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_iterator_unpickle_compat(self):
testcases = [
b'c__builtin__\niter\n(c__builtin__\nxrange\n(I10\nI20\nI2\ntRtRI2\nb.',
b'c__builtin__\niter\n(c__builtin__\nxrange\n(K\nK\x14K\x02tRtRK\x02b.',
b'\x80\x02c__builtin__\niter\nc__builtin__\nxrange\nK\nK\x14K\x02\x87R\x85RK\x02b.',
b'\x80\x03cbuiltins\niter\ncbuiltins\nrange\nK\nK\x14K\x02\x87R\x85RK\x02b.',
b'\x80\x04\x951\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x8c\x04iter\x93\x8c\x08builtins\x8c\x05range\x93K\nK\x14K\x02\x87R\x85RK\x02b.',
b'c__builtin__\niter\n(c__builtin__\nxrange\n(L-36893488147419103232L\nI20\nI2\ntRtRL18446744073709551623L\nb.',
b'c__builtin__\niter\n(c__builtin__\nxrange\n(L-36893488147419103232L\nK\x14K\x02tRtRL18446744073709551623L\nb.',
b'\x80\x02c__builtin__\niter\nc__builtin__\nxrange\n\x8a\t\x00\x00\x00\x00\x00\x00\x00\x00\xfeK\x14K\x02\x87R\x85R\x8a\t\x07\x00\x00\x00\x00\x00\x00\x00\x01b.',
b'\x80\x03cbuiltins\niter\ncbuiltins\nrange\n\x8a\t\x00\x00\x00\x00\x00\x00\x00\x00\xfeK\x14K\x02\x87R\x85R\x8a\t\x07\x00\x00\x00\x00\x00\x00\x00\x01b.',
b'\x80\x04\x95C\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x8c\x04iter\x93\x8c\x08builtins\x8c\x05range\x93\x8a\t\x00\x00\x00\x00\x00\x00\x00\x00\xfeK\x14K\x02\x87R\x85R\x8a\t\x07\x00\x00\x00\x00\x00\x00\x00\x01b.',
]
for t in testcases:
it = pickle.loads(t)
self.assertEqual(list(it), [14, 16, 18])
def test_iterator_setstate(self):
it = iter(range(10, 20, 2))
it.__setstate__(2)
self.assertEqual(list(it), [14, 16, 18])
it = reversed(range(10, 20, 2))
it.__setstate__(3)
self.assertEqual(list(it), [12, 10])
it = iter(range(-2**65, 20, 2))
it.__setstate__(2**64 + 7)
self.assertEqual(list(it), [14, 16, 18])
it = reversed(range(10, 2**65, 2))
it.__setstate__(2**64 - 7)
self.assertEqual(list(it), [12, 10])
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1+0j, range(3))
self.assertIn(ALWAYS_EQ, range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self): return 1
def __index__(self): return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other): return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
test_ranges += [(-2**63, 2**63-2, 1)] # regression test for gh-100810
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = "range({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(range({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_range_iterators_invocation(self):
# verify range iterators instances cannot be created by
# calling their type
rangeiter_type = type(iter(range(0)))
self.assertRaises(TypeError, rangeiter_type, 1, 3, 1)
long_rangeiter_type = type(iter(range(1 << 1000)))
self.assertRaises(TypeError, long_rangeiter_type, 1, 3, 1)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_issue11845(self):
r = range(*slice(1, 18, 2).indices(20))
values = {None, 0, 1, -1, 2, -2, 5, -5, 19, -19,
20, -20, 21, -21, 30, -30, 99, -99}
for i in values:
for j in values:
for k in values - {0}:
r[i:j:k]
def test_comparison(self):
test_ranges = [range(0), range(0, -1), range(1, 1, 3),
range(1), range(5, 6), range(5, 6, 2),
range(5, 7, 2), range(2), range(0, 4, 2),
range(0, 5, 2), range(0, 6, 2)]
test_tuples = list(map(tuple, test_ranges))
# Check that equality of ranges matches equality of the corresponding
# tuples for each pair from the test lists above.
ranges_eq = [a == b for a in test_ranges for b in test_ranges]
tuples_eq = [a == b for a in test_tuples for b in test_tuples]
self.assertEqual(ranges_eq, tuples_eq)
# Check that != correctly gives the logical negation of ==
ranges_ne = [a != b for a in test_ranges for b in test_ranges]
self.assertEqual(ranges_ne, [not x for x in ranges_eq])
# Ranges are unequal to other types (even sequence types)
self.assertIs(range(0) == (), False)
# self.assertIs(() == range(0), False)
self.assertIs(range(2) == [0, 1], False)
# Huge integers aren't a problem.
self.assertEqual(range(0, 2**100 - 1, 2),
range(0, 2**100, 2))
self.assertNotEqual(range(0, 2**100, 2),
range(0, 2**100 + 1, 2))
self.assertEqual(range(2**200, 2**201 - 2**99, 2**100),
range(2**200, 2**201, 2**100))
self.assertNotEqual(range(2**200, 2**201, 2**100),
range(2**200, 2**201 + 1, 2**100))
# Order comparisons are not implemented for ranges.
with self.assertRaises(TypeError):
range(0) < range(0)
with self.assertRaises(TypeError):
range(0) > range(0)
with self.assertRaises(TypeError):
range(0) <= range(0)
with self.assertRaises(TypeError):
range(0) >= range(0)
def test_attributes(self):
# test the start, stop and step attributes of range objects
self.assert_attrs(range(0), 0, 0, 1)
self.assert_attrs(range(10), 0, 10, 1)
self.assert_attrs(range(-10), 0, -10, 1)
self.assert_attrs(range(0, 10, 1), 0, 10, 1)
self.assert_attrs(range(0, 10, 3), 0, 10, 3)
self.assert_attrs(range(10, 0, -1), 10, 0, -1)
self.assert_attrs(range(10, 0, -3), 10, 0, -3)
self.assert_attrs(range(True), 0, 1, 1)
self.assert_attrs(range(False, True), 0, 1, 1)
self.assert_attrs(range(False, True, True), 0, 1, 1)
def assert_attrs(self, rangeobj, start, stop, step):
self.assertEqual(rangeobj.start, start)
self.assertEqual(rangeobj.stop, stop)
self.assertEqual(rangeobj.step, step)
self.assertIs(type(rangeobj.start), int)
self.assertIs(type(rangeobj.stop), int)
self.assertIs(type(rangeobj.step), int)
if __name__ == "__main__":
run_tests()
| RangeTest |
python | pennersr__django-allauth | allauth/usersessions/views.py | {
"start": 577,
"end": 1666
} | class ____(FormView):
template_name = (
"usersessions/usersession_list." + account_settings.TEMPLATE_EXTENSION
)
form_class = ManageUserSessionsForm
success_url = reverse_lazy("usersessions_list")
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
sessions = sorted(
UserSession.objects.purge_and_list(self.request.user),
key=lambda s: s.created_at,
)
ret["sessions"] = sessions
ret["session_count"] = len(sessions)
ret["show_last_seen_at"] = app_settings.TRACK_ACTIVITY
return ret
def get_form_kwargs(self):
ret = super().get_form_kwargs()
ret["request"] = self.request
return ret
def form_valid(self, form):
form.save(self.request)
get_account_adapter().add_message(
self.request,
messages.INFO,
"usersessions/messages/sessions_logged_out.txt",
)
return super().form_valid(form)
list_usersessions = ListUserSessionsView.as_view()
| ListUserSessionsView |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 10959,
"end": 11625
} | class ____:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (
self._utype_in_v1_2
and utype is not None
and not self._config.get("version_1_2_or_later")
):
warn_or_raise(
W28, W28, ("utype", self._element_name, "1.2"), self._config, self._pos
)
check_string(utype, "utype", self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
| _UtypeProperty |
python | getsentry__sentry | tests/sentry/db/test_transactions.py | {
"start": 5165,
"end": 5921
} | class ____(CaseMixin):
@no_silo_test
@django_db_all
def test_in_test_assert_no_transaction(self) -> None:
super().test_in_test_assert_no_transaction()
@no_silo_test
@django_db_all
def test_transaction_on_commit(self) -> None:
super().test_transaction_on_commit()
@no_silo_test
@django_db_all
def test_safe_transaction_boundaries(self) -> None:
super().test_safe_transaction_boundaries()
@no_silo_test
@django_db_all
def test_bad_transaction_boundaries(self) -> None:
super().test_bad_transaction_boundaries()
@no_silo_test
@django_db_all
def test_collect_transaction_queries(self) -> None:
super().test_collect_transaction_queries()
| TestPytestDjangoDbAll |
python | pypa__setuptools | setuptools/_distutils/tests/test_modified.py | {
"start": 251,
"end": 4221
} | class ____(support.TempdirManager):
def test_newer(self):
tmpdir = self.mkdtemp()
new_file = os.path.join(tmpdir, 'new')
old_file = os.path.abspath(__file__)
# Raise DistutilsFileError if 'new_file' does not exist.
with pytest.raises(DistutilsFileError):
newer(new_file, old_file)
# Return true if 'new_file' exists and is more recently modified than
# 'old_file', or if 'new_file' exists and 'old_file' doesn't.
self.write_file(new_file)
assert newer(new_file, 'I_dont_exist')
assert newer(new_file, old_file)
# Return false if both exist and 'old_file' is the same age or younger
# than 'new_file'.
assert not newer(old_file, new_file)
def _setup_1234(self):
tmpdir = self.mkdtemp()
sources = os.path.join(tmpdir, 'sources')
targets = os.path.join(tmpdir, 'targets')
os.mkdir(sources)
os.mkdir(targets)
one = os.path.join(sources, 'one')
two = os.path.join(sources, 'two')
three = os.path.abspath(__file__) # I am the old file
four = os.path.join(targets, 'four')
self.write_file(one)
self.write_file(two)
self.write_file(four)
return one, two, three, four
def test_newer_pairwise(self):
one, two, three, four = self._setup_1234()
assert newer_pairwise([one, two], [three, four]) == ([one], [three])
def test_newer_pairwise_mismatch(self):
one, two, three, four = self._setup_1234()
with pytest.raises(ValueError):
newer_pairwise([one], [three, four])
with pytest.raises(ValueError):
newer_pairwise([one, two], [three])
def test_newer_pairwise_empty(self):
assert newer_pairwise([], []) == ([], [])
def test_newer_pairwise_fresh(self):
one, two, three, four = self._setup_1234()
assert newer_pairwise([one, three], [two, four]) == ([], [])
def test_newer_group(self):
tmpdir = self.mkdtemp()
sources = os.path.join(tmpdir, 'sources')
os.mkdir(sources)
one = os.path.join(sources, 'one')
two = os.path.join(sources, 'two')
three = os.path.join(sources, 'three')
old_file = os.path.abspath(__file__)
# return true if 'old_file' is out-of-date with respect to any file
# listed in 'sources'.
self.write_file(one)
self.write_file(two)
self.write_file(three)
assert newer_group([one, two, three], old_file)
assert not newer_group([one, two, old_file], three)
# missing handling
os.remove(one)
with pytest.raises(OSError):
newer_group([one, two, old_file], three)
assert not newer_group([one, two, old_file], three, missing='ignore')
assert newer_group([one, two, old_file], three, missing='newer')
@pytest.fixture
def groups_target(tmp_path):
"""
Set up some older sources, a target, and newer sources.
Returns a simple namespace with these values.
"""
filenames = ['older.c', 'older.h', 'target.o', 'newer.c', 'newer.h']
paths = [tmp_path / name for name in filenames]
for mtime, path in enumerate(paths):
path.write_text('', encoding='utf-8')
# make sure modification times are sequential
os.utime(path, (mtime, mtime))
return types.SimpleNamespace(older=paths[:2], target=paths[2], newer=paths[3:])
def test_newer_pairwise_group(groups_target):
older = newer_pairwise_group([groups_target.older], [groups_target.target])
newer = newer_pairwise_group([groups_target.newer], [groups_target.target])
assert older == ([], [])
assert newer == ([groups_target.newer], [groups_target.target])
def test_newer_group_no_sources_no_target(tmp_path):
"""
Consider no sources and no target "newer".
"""
assert newer_group([], str(tmp_path / 'does-not-exist'))
| TestDepUtil |
python | joke2k__faker | faker/providers/date_time/cs_CZ/__init__.py | {
"start": 46,
"end": 766
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "neděle",
"1": "pondělí",
"2": "úterý",
"3": "středa",
"4": "čtvrtek",
"5": "pátek",
"6": "sobota",
}
MONTH_NAMES = {
"01": "leden",
"02": "únor",
"03": "březen",
"04": "duben",
"05": "květen",
"06": "červen",
"07": "červenec",
"08": "srpen",
"09": "září",
"10": "říjen",
"11": "listopad",
"12": "prosinec",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | pdm-project__pdm | src/pdm/cli/commands/self_cmd.py | {
"start": 3976,
"end": 5079
} | class ____(BaseCommand):
"""Install packages to the PDM's environment"""
arguments = (verbose_option,)
name = "add"
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--pip-args",
help="Arguments that will be passed to pip install",
default="",
)
parser.add_argument(
"packages",
nargs="+",
help="Specify one or many package names, each package can have a version specifier",
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
pip_args = ["install", *shlex.split(options.pip_args), *options.packages]
try:
with project.core.ui.open_spinner(f"Installing packages: {options.packages}"):
run_pip(project, pip_args)
except subprocess.CalledProcessError as e:
project.core.ui.echo("[error]Installation failed:[/]\n" + e.output, err=True)
sys.exit(1)
else:
project.core.ui.echo("[success]Installation succeeds.[/]")
| AddCommand |
python | numba__numba | numba/core/types/common.py | {
"start": 532,
"end": 783
} | class ____(IteratorType):
def __init__(self, name, yield_type):
self._yield_type = yield_type
super(SimpleIteratorType, self).__init__(name)
@property
def yield_type(self):
return self._yield_type
| SimpleIteratorType |
python | pytorch__pytorch | torch/_inductor/remote_cache.py | {
"start": 2725,
"end": 2971
} | class ____(RemoteCacheSerde[JsonDataTy, bytes]):
def encode(self, data: JsonDataTy) -> bytes:
return bytes(json.dumps(data), "ascii")
def decode(self, data: bytes) -> JsonDataTy:
return json.loads(data)
| RemoteCacheJsonSerde |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py | {
"start": 802,
"end": 884
} | class ____(BaseVoiceAgentEvent):
call_id: str
output: str
| FunctionResultItem |
python | google__jax | jax/experimental/sparse/util.py | {
"start": 1090,
"end": 1181
} | class ____(SparseEfficiencyWarning):
pass
Shape = tuple[int, ...]
| CuSparseEfficiencyWarning |
python | ray-project__ray | python/ray/tests/test_placement_group_4.py | {
"start": 765,
"end": 14396
} | class ____(RuntimeEnvPlugin):
name = MOCK_WORKER_STARTUP_SLOWLY_PLUGIN_NAME
def validate(runtime_env_dict: dict) -> str:
return "success"
@staticmethod
def create(uri: str, runtime_env_dict: dict, ctx: RuntimeEnvContext) -> float:
time.sleep(60)
return 0
def test_remove_placement_group(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
@ray.remote
def warmup():
pass
# warm up the cluster.
ray.get([warmup.remote() for _ in range(4)])
# First try to remove a placement group that doesn't
# exist. This should not do anything.
random_group_id = PlacementGroupID.from_random()
random_placement_group = PlacementGroup(random_group_id)
for _ in range(3):
ray.util.remove_placement_group(random_placement_group)
# Creating a placement group as soon as it is
# created should work.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
ray.util.remove_placement_group(placement_group)
wait_for_condition(lambda: is_placement_group_removed(placement_group))
# # Now let's create a placement group.
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
# Create an actor that occupies resources.
@ray.remote(num_cpus=2)
class A:
def f(self):
return 3
# Currently, there's no way to prevent
# tasks to be retried for removed placement group.
# Set max_retries=0 for testing.
# TODO(sang): Handle this edge case.
@ray.remote(num_cpus=2, max_retries=0)
def long_running_task():
print(os.getpid())
time.sleep(50)
# Schedule a long running task and actor.
task_ref = long_running_task.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group
)
).remote()
a = A.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group
)
).remote()
assert ray.get(a.f.remote()) == 3
ray.util.remove_placement_group(placement_group)
# Subsequent remove request shouldn't do anything.
for _ in range(3):
ray.util.remove_placement_group(placement_group)
# Make sure placement group resources are
# released and we can schedule this task.
@ray.remote(num_cpus=4)
def f():
return 3
assert ray.get(f.remote()) == 3
# Since the placement group is removed,
# the actor should've been killed.
# That means this request should fail.
with pytest.raises(ray.exceptions.RayActorError, match="actor died"):
ray.get(a.f.remote(), timeout=3.0)
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(task_ref)
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + MOCK_WORKER_STARTUP_SLOWLY_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_remove_placement_group_worker_startup_slowly(
set_runtime_env_plugins, ray_start_cluster
):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
placement_group = ray.util.placement_group([{"CPU": 2}, {"CPU": 2}])
assert placement_group.wait(10)
@ray.remote(num_cpus=2)
class A:
def ready(self):
return "ok"
def hang(self):
time.sleep(60)
@ray.remote(num_cpus=2, max_retries=0)
def long_running_task():
time.sleep(60)
# Schedule a long-running task that uses
# runtime env to mock worker start up slowly.
task_ref = long_running_task.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group
),
runtime_env={MOCK_WORKER_STARTUP_SLOWLY_PLUGIN_NAME: {}},
).remote()
a = A.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group
)
).remote()
assert ray.get(a.ready.remote()) == "ok"
# Remove the PG, check that the actor and task are failed.
ray.util.remove_placement_group(placement_group)
with pytest.raises(ray.exceptions.RayActorError, match="actor died"):
ray.get(a.hang.remote(), timeout=10)
# The long-running task should still be in the state
# of leasing-worker bacause of the worker startup delay.
with pytest.raises(ray.exceptions.TaskPlacementGroupRemoved):
ray.get(task_ref)
def test_remove_pending_placement_group(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
# Create a placement group that cannot be scheduled now.
placement_group = ray.util.placement_group([{"GPU": 2}, {"CPU": 2}])
ray.util.remove_placement_group(placement_group)
# TODO(sang): Add state check here.
@ray.remote(num_cpus=4)
def f():
return 3
# Make sure this task is still schedulable.
assert ray.get(f.remote()) == 3
placement_group_assert_no_leak([placement_group])
def test_placement_group_table(ray_start_cluster):
@ray.remote(num_cpus=2)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
num_nodes = 2
for _ in range(num_nodes):
cluster.add_node(num_cpus=4)
ray.init(address=cluster.address)
pgs_created = []
# Originally placement group creation should be pending because
# there are no resources.
name = "name"
strategy = "PACK"
bundles = [{"CPU": 2, "GPU": 1}, {"CPU": 2}]
placement_group = ray.util.placement_group(
name=name, strategy=strategy, bundles=bundles
)
pgs_created.append(placement_group)
result = ray.util.placement_group_table(placement_group)
assert result["name"] == name
assert result["strategy"] == strategy
for i in range(len(bundles)):
assert bundles[i] == result["bundles"][i]
assert result["state"] == "PENDING"
# Now the placement group should be scheduled.
cluster.add_node(num_cpus=5, num_gpus=1)
cluster.wait_for_nodes()
actor_1 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=0
)
).remote()
ray.get(actor_1.value.remote())
result = ray.util.placement_group_table(placement_group)
assert result["state"] == "CREATED"
# Add tow more placement group for placement group table test.
second_strategy = "SPREAD"
pgs_created.append(
ray.util.placement_group(
name="second_placement_group", strategy=second_strategy, bundles=bundles
)
)
pgs_created.append(
ray.util.placement_group(
name="third_placement_group", strategy=second_strategy, bundles=bundles
)
)
placement_group_table = ray.util.placement_group_table()
assert len(placement_group_table) == 3
true_name_set = {"name", "second_placement_group", "third_placement_group"}
get_name_set = set()
for _, placement_group_data in placement_group_table.items():
get_name_set.add(placement_group_data["name"])
assert true_name_set == get_name_set
placement_group_assert_no_leak(pgs_created)
def test_placement_group_stats(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_cpus=4, num_gpus=1)
ray.init(address=cluster.address)
# Test createable pgs.
pg = ray.util.placement_group(bundles=[{"CPU": 4, "GPU": 1}])
ray.get(pg.ready())
stats = ray.util.placement_group_table(pg)["stats"]
assert stats["scheduling_attempt"] == 1
assert stats["scheduling_state"] == "FINISHED"
assert stats["end_to_end_creation_latency_ms"] != 0
# Create a pending pg.
pg2 = ray.util.placement_group(bundles=[{"CPU": 4, "GPU": 1}])
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg2)["stats"]
if stats["scheduling_attempt"] != 1:
return False
if stats["scheduling_state"] != "NO_RESOURCES":
return False
if stats["end_to_end_creation_latency_ms"] != 0:
return False
return True
wait_for_condition(assert_scheduling_state)
# Remove the first pg, and the second
# pg should be schedulable now.
ray.util.remove_placement_group(pg)
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg2)["stats"]
if stats["scheduling_state"] != "FINISHED":
return False
if stats["end_to_end_creation_latency_ms"] == 0:
return False
return True
wait_for_condition(assert_scheduling_state)
# Infeasible pg.
pg3 = ray.util.placement_group(bundles=[{"CPU": 4, "a": 1}])
# TODO This is supposed to be infeasible, but it is printed
# as NO_RESOURCES. Fix the issue.
# def assert_scheduling_state():
# stats = ray.util.placement_group_table(pg3)["stats"]
# print(stats)
# if stats["scheduling_state"] != "INFEASIBLE":
# return False
# return True
# wait_for_condition(assert_scheduling_state)
ray.util.remove_placement_group(pg3)
def assert_scheduling_state():
stats = ray.util.placement_group_table(pg3)["stats"]
if stats["scheduling_state"] != "REMOVED":
return False
return True
wait_for_condition(assert_scheduling_state)
placement_group_assert_no_leak([pg2])
def test_cuda_visible_devices(ray_start_cluster):
@ray.remote(num_gpus=1)
def f():
return os.environ["CUDA_VISIBLE_DEVICES"]
cluster = ray_start_cluster
num_nodes = 1
for _ in range(num_nodes):
cluster.add_node(num_gpus=1)
ray.init(address=cluster.address)
g1 = ray.util.placement_group([{"CPU": 1, "GPU": 1}])
o1 = f.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=g1)
).remote()
devices = ray.get(o1)
assert devices == "0", devices
placement_group_assert_no_leak([g1])
def test_placement_group_reschedule_when_node_dead(ray_start_cluster):
@ray.remote(num_cpus=1)
class Actor(object):
def __init__(self):
self.n = 0
def value(self):
return self.n
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.add_node(num_cpus=4)
cluster.wait_for_nodes()
ray.init(address=cluster.address, namespace="default_test_namespace")
# Make sure both head and worker node are alive.
nodes = ray.nodes()
assert len(nodes) == 3
assert nodes[0]["alive"] and nodes[1]["alive"] and nodes[2]["alive"]
placement_group = ray.util.placement_group(
name="name", strategy="SPREAD", bundles=[{"CPU": 2}, {"CPU": 2}, {"CPU": 2}]
)
actor_1 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=0
),
lifetime="detached",
).remote()
actor_2 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=1
),
lifetime="detached",
).remote()
actor_3 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=2
),
lifetime="detached",
).remote()
ray.get(actor_1.value.remote())
ray.get(actor_2.value.remote())
ray.get(actor_3.value.remote())
cluster.remove_node(get_other_nodes(cluster, exclude_head=True)[-1])
cluster.wait_for_nodes()
actor_4 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=0
),
lifetime="detached",
).remote()
actor_5 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=1
),
lifetime="detached",
).remote()
actor_6 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=2
),
lifetime="detached",
).remote()
ray.get(actor_4.value.remote())
ray.get(actor_5.value.remote())
ray.get(actor_6.value.remote())
placement_group_assert_no_leak([placement_group])
def test_infeasible_pg(ray_start_cluster):
"""Test infeasible pgs are scheduled after new nodes are added."""
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init("auto")
bundle = {"CPU": 4, "GPU": 1}
pg = ray.util.placement_group([bundle], name="worker_1", strategy="STRICT_PACK")
# Placement group is infeasible.
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(pg.ready(), timeout=3)
state = ray.util.placement_group_table()[pg.id.hex()]["stats"]["scheduling_state"]
assert state == "INFEASIBLE"
# Add a new node. PG can now be scheduled.
cluster.add_node(num_cpus=4, num_gpus=1)
assert ray.get(pg.ready(), timeout=10)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| MockWorkerStartupSlowlyPlugin |
python | ray-project__ray | python/ray/_private/parameter.py | {
"start": 310,
"end": 22122
} | class ____:
"""A class used to store the parameters used by Ray.
Attributes:
redis_address: The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_port: The port that the primary Redis shard should listen
to. If None, then it will fall back to
ray._private.ray_constants.DEFAULT_PORT, or a random port if the default is
not available.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards. If None, then it will fall back to the ports right after
redis_port, or random ports if those are not available.
num_cpus: Number of CPUs to configure the raylet with.
num_gpus: Number of GPUs to configure the raylet with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
labels: The key-value labels of the node.
memory: Total available memory for workers requesting memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
object_manager_port int: The port to use for the object manager.
node_manager_port: The port to use for the node manager.
gcs_server_port: The port to use for the GCS server.
node_ip_address: The IP address of the node that we are on.
min_worker_port: The lowest port number that workers will bind
on. If not set or set to 0, random ports will be chosen.
max_worker_port: The highest port number that workers will bind
on. If set, min_worker_port must also be set.
worker_port_list: An explicit list of ports to be used for
workers (comma-separated). Overrides min_worker_port and
max_worker_port.
ray_client_server_port: The port number the ray client server
will bind on. If not set, the ray client server will not
be started.
redirect_output: True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
external_addresses: The address of external Redis server to
connect to, in format of "ip1:port1,ip2:port2,...". If this
address is provided, then ray won't start Redis instances in the
head node but use external Redis server(s) instead.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_username: Prevents external clients without the username
from connecting to Redis if provided.
redis_password: Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
object_spilling_directory: The path to spill objects to. The same path will
be used as the object store fallback directory as well.
worker_path: The path of the source code that will be run by the
worker.
setup_worker_path: The path of the Python file that will set up
the environment for the worker process.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_dashboard: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this value is
None, then the UI will be started if the relevant dependencies are
present.
dashboard_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to.
Defaults to 8265.
dashboard_agent_listen_port: The port for dashboard agents to listen on
for HTTP requests.
Defaults to 52365.
runtime_env_agent_port: The port at which the runtime env agent
listens to for HTTP.
Defaults to random available port.
plasma_store_socket_name: If provided, it specifies the socket
name used by the plasma store.
raylet_socket_name: If provided, it specifies the socket path
used by the raylet process.
temp_dir: If provided, it will specify the root temporary
directory for the Ray process. Must be an absolute path.
runtime_env_dir_name: If provided, specifies the directory that
will be created in the session dir to hold runtime_env files.
include_log_monitor: If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
autoscaling_config: path to autoscaling config file.
metrics_agent_port: The port to bind metrics agent.
metrics_export_port: The port at which metrics are exposed
through a Prometheus endpoint.
no_monitor: If True, the ray autoscaler monitor for this cluster
will not be started.
_system_config: Configuration for overriding RayConfig
defaults. Used to set system configuration and for experimental Ray
core feature flags.
enable_object_reconstruction: Enable plasma reconstruction on
failure.
ray_debugger_external: If true, make the Ray debugger for a
worker available externally to the node it is running on. This will
bind on 0.0.0.0 instead of localhost.
env_vars: Override environment variables for the raylet.
session_name: The current Ray session name.
webui: The url of the UI.
cluster_id: The cluster ID in hex string.
resource_isolation_config: settings for cgroupv2 based isolation of ray
system processes (defaults to no isolation if config not provided)
"""
def __init__(
self,
redis_address: Optional[str] = None,
gcs_address: Optional[str] = None,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
labels: Optional[Dict[str, str]] = None,
memory: Optional[float] = None,
object_store_memory: Optional[float] = None,
redis_port: Optional[int] = None,
redis_shard_ports: Optional[List[int]] = None,
object_manager_port: Optional[int] = None,
node_manager_port: int = 0,
gcs_server_port: Optional[int] = None,
node_ip_address: Optional[str] = None,
node_name: Optional[str] = None,
min_worker_port: Optional[int] = None,
max_worker_port: Optional[int] = None,
worker_port_list: Optional[List[int]] = None,
ray_client_server_port: Optional[int] = None,
driver_mode=None,
redirect_output: Optional[bool] = None,
external_addresses: Optional[List[str]] = None,
num_redis_shards: Optional[int] = None,
redis_max_clients: Optional[int] = None,
redis_username: Optional[str] = ray_constants.REDIS_DEFAULT_USERNAME,
redis_password: Optional[str] = ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory: Optional[str] = None,
object_spilling_directory: Optional[str] = None,
worker_path: Optional[str] = None,
setup_worker_path: Optional[str] = None,
huge_pages: Optional[bool] = False,
include_dashboard: Optional[bool] = None,
dashboard_host: Optional[str] = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[bool] = ray_constants.DEFAULT_DASHBOARD_PORT,
dashboard_agent_listen_port: Optional[
int
] = ray_constants.DEFAULT_DASHBOARD_AGENT_LISTEN_PORT,
runtime_env_agent_port: Optional[int] = None,
plasma_store_socket_name: Optional[str] = None,
raylet_socket_name: Optional[str] = None,
temp_dir: Optional[str] = None,
runtime_env_dir_name: Optional[str] = None,
include_log_monitor: Optional[str] = None,
autoscaling_config: Optional[str] = None,
ray_debugger_external: bool = False,
_system_config: Optional[Dict[str, str]] = None,
enable_object_reconstruction: Optional[bool] = False,
metrics_agent_port: Optional[int] = None,
metrics_export_port: Optional[int] = None,
tracing_startup_hook=None,
no_monitor: Optional[bool] = False,
env_vars: Optional[Dict[str, str]] = None,
session_name: Optional[str] = None,
webui: Optional[str] = None,
cluster_id: Optional[str] = None,
node_id: Optional[str] = None,
resource_isolation_config: Optional[ResourceIsolationConfig] = None,
):
self.redis_address = redis_address
self.gcs_address = gcs_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.object_manager_port = object_manager_port
self.node_manager_port = node_manager_port
self.gcs_server_port = gcs_server_port
self.node_ip_address = node_ip_address
self.node_name = node_name
self.min_worker_port = min_worker_port
self.max_worker_port = max_worker_port
self.worker_port_list = worker_port_list
self.ray_client_server_port = ray_client_server_port
self.driver_mode = driver_mode
self.redirect_output = redirect_output
self.external_addresses = external_addresses
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_username = redis_username
self.redis_password = redis_password
self.plasma_directory = plasma_directory
self.object_spilling_directory = object_spilling_directory
self.worker_path = worker_path
self.setup_worker_path = setup_worker_path
self.huge_pages = huge_pages
self.include_dashboard = include_dashboard
self.dashboard_host = dashboard_host
self.dashboard_port = dashboard_port
self.dashboard_agent_listen_port = dashboard_agent_listen_port
self.runtime_env_agent_port = runtime_env_agent_port
self.plasma_store_socket_name = plasma_store_socket_name
self.raylet_socket_name = raylet_socket_name
self.temp_dir = temp_dir
self.runtime_env_dir_name = (
runtime_env_dir_name or ray_constants.DEFAULT_RUNTIME_ENV_DIR_NAME
)
self.include_log_monitor = include_log_monitor
self.autoscaling_config = autoscaling_config
self.metrics_agent_port = metrics_agent_port
self.metrics_export_port = metrics_export_port
self.tracing_startup_hook = tracing_startup_hook
self.no_monitor = no_monitor
self.ray_debugger_external = ray_debugger_external
self.env_vars = env_vars
self.session_name = session_name
self.webui = webui
self._system_config = _system_config or {}
self._enable_object_reconstruction = enable_object_reconstruction
self.labels = labels
self._check_usage()
self.cluster_id = cluster_id
self.node_id = node_id
self.resource_isolation_config = resource_isolation_config
if not self.resource_isolation_config:
self.resource_isolation_config = ResourceIsolationConfig(
enable_resource_isolation=False
)
# Set the internal config options for object reconstruction.
if enable_object_reconstruction:
# Turn off object pinning.
if self._system_config is None:
self._system_config = dict()
print(self._system_config)
self._system_config["lineage_pinning_enabled"] = True
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError(f"Invalid RayParams parameter in update: {arg}")
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError(
f"Invalid RayParams parameter in update_if_absent: {arg}"
)
self._check_usage()
def update_pre_selected_port(self):
"""Update the pre-selected port information
Returns:
The dictionary mapping of component -> ports.
"""
def wrap_port(port):
# 0 port means select a random port for the grpc server.
if port is None or port == 0:
return []
else:
return [port]
# Create a dictionary of the component -> port mapping.
pre_selected_ports = {
"gcs": wrap_port(self.redis_port),
"object_manager": wrap_port(self.object_manager_port),
"node_manager": wrap_port(self.node_manager_port),
"gcs_server": wrap_port(self.gcs_server_port),
"client_server": wrap_port(self.ray_client_server_port),
"dashboard": wrap_port(self.dashboard_port),
"dashboard_agent_grpc": wrap_port(self.metrics_agent_port),
"dashboard_agent_http": wrap_port(self.dashboard_agent_listen_port),
"runtime_env_agent": wrap_port(self.runtime_env_agent_port),
"metrics_export": wrap_port(self.metrics_export_port),
}
redis_shard_ports = self.redis_shard_ports
if redis_shard_ports is None:
redis_shard_ports = []
pre_selected_ports["redis_shards"] = redis_shard_ports
if self.worker_port_list is None:
if self.min_worker_port is not None and self.max_worker_port is not None:
pre_selected_ports["worker_ports"] = list(
range(self.min_worker_port, self.max_worker_port + 1)
)
else:
# The dict is not updated when it requires random ports.
pre_selected_ports["worker_ports"] = []
else:
pre_selected_ports["worker_ports"] = [
int(port) for port in self.worker_port_list.split(",")
]
# Update the pre selected port set.
self.reserved_ports = set()
for comp, port_list in pre_selected_ports.items():
for port in port_list:
if port in self.reserved_ports:
raise ValueError(
f"Ray component {comp} is trying to use "
f"a port number {port} that is used by other components.\n"
f"Port information: {self._format_ports(pre_selected_ports)}\n"
"If you allocate ports, please make sure the same port "
"is not used by multiple components."
)
self.reserved_ports.add(port)
def _check_usage(self):
if self.worker_port_list is not None:
for port_str in self.worker_port_list.split(","):
try:
port = int(port_str)
except ValueError as e:
raise ValueError(
"worker_port_list must be a comma-separated "
f"list of integers: {e}"
) from None
if port < 1024 or port > 65535:
raise ValueError(
"Ports in worker_port_list must be "
f"between 1024 and 65535. Got: {port}"
)
# Used primarily for testing.
if os.environ.get("RAY_USE_RANDOM_PORTS", False):
if self.min_worker_port is None and self.max_worker_port is None:
self.min_worker_port = 0
self.max_worker_port = 0
if self.min_worker_port is not None:
if self.min_worker_port != 0 and (
self.min_worker_port < 1024 or self.min_worker_port > 65535
):
raise ValueError(
"min_worker_port must be 0 or an integer between 1024 and 65535."
)
if self.max_worker_port is not None:
if self.min_worker_port is None:
raise ValueError(
"If max_worker_port is set, min_worker_port must also be set."
)
elif self.max_worker_port != 0:
if self.max_worker_port < 1024 or self.max_worker_port > 65535:
raise ValueError(
"max_worker_port must be 0 or an integer between "
"1024 and 65535."
)
elif self.max_worker_port <= self.min_worker_port:
raise ValueError(
"max_worker_port must be higher than min_worker_port."
)
if self.ray_client_server_port is not None:
if get_ray_client_dependency_error() is not None:
raise ValueError(
"Ray Client requires pip package `ray[client]`. "
"If you installed the minimal Ray (e.g. `pip install ray`), "
"please reinstall by executing `pip install ray[client]`."
)
if (
self.ray_client_server_port < 1024
or self.ray_client_server_port > 65535
):
raise ValueError(
"ray_client_server_port must be an integer "
"between 1024 and 65535."
)
if self.runtime_env_agent_port is not None:
if (
self.runtime_env_agent_port < 1024
or self.runtime_env_agent_port > 65535
):
raise ValueError(
"runtime_env_agent_port must be an integer "
"between 1024 and 65535."
)
if self.resources is not None:
def build_error(resource, alternative):
return (
f"{self.resources} -> `{resource}` cannot be a "
"custom resource because it is one of the default resources "
f"({ray_constants.DEFAULT_RESOURCES}). "
f"Use `{alternative}` instead. For example, use `ray start "
f"--{alternative.replace('_', '-')}=1` instead of "
f"`ray start --resources={{'{resource}': 1}}`"
)
assert "CPU" not in self.resources, build_error("CPU", "num_cpus")
assert "GPU" not in self.resources, build_error("GPU", "num_gpus")
assert "memory" not in self.resources, build_error("memory", "memory")
assert "object_store_memory" not in self.resources, build_error(
"object_store_memory", "object_store_memory"
)
if self.redirect_output is not None:
raise DeprecationWarning("The redirect_output argument is deprecated.")
if self.temp_dir is not None and not os.path.isabs(self.temp_dir):
raise ValueError("temp_dir must be absolute path or None.")
if self.temp_dir is not None and os.getenv("VIRTUAL_ENV"):
is_relative = True
try:
(
pathlib.Path(self.temp_dir)
.resolve()
.relative_to(pathlib.Path(os.getenv("VIRTUAL_ENV")).resolve())
)
except ValueError:
is_relative = False
if is_relative:
raise ValueError(
"temp_dir must not be child directory of virtualenv root"
)
def _format_ports(self, pre_selected_ports):
"""Format the pre-selected ports information to be more human-readable."""
ports = pre_selected_ports.copy()
for comp, port_list in ports.items():
if len(port_list) == 1:
ports[comp] = port_list[0]
elif len(port_list) == 0:
# Nothing is selected, meaning it will be randomly selected.
ports[comp] = "random"
elif comp == "worker_ports":
min_port = port_list[0]
max_port = port_list[len(port_list) - 1]
if len(port_list) < 50:
port_range_str = str(port_list)
else:
port_range_str = f"from {min_port} to {max_port}"
ports[comp] = f"{len(port_list)} ports {port_range_str}"
return ports
| RayParams |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 7298,
"end": 8458
} | class ____(Action):
"""Base class for Actions that operate on Automations and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"automation (given by `automation_id`), or to an automation that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `automation_id` may not be set. If the source is 'selected', the "
"`automation_id` must be set."
),
)
automation_id: Optional[UUID] = Field(
None, description="The identifier of the automation to act on"
)
@model_validator(mode="after")
def selected_automation_requires_id(self) -> Self:
wants_selected_automation = self.source == "selected"
has_automation_id = bool(self.automation_id)
if wants_selected_automation != has_automation_id:
raise ValueError(
"automation_id is "
+ ("not allowed" if has_automation_id else "required")
)
return self
| AutomationAction |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_connector/file_path_data_connector.py | {
"start": 1611,
"end": 24065
} | class ____(DataConnector):
"""The base class for Data Connectors designed to access filesystem-like data.
This can include traditional, disk-based filesystems or object stores such as S3, GCS, or ABS.
See the `DataConnector` base class for more information on the role of Data Connectors.
Note that `FilePathDataConnector` is not meant to be used on its own, but extended.
Args:
datasource_name: The name of the Datasource associated with this DataConnector instance
data_asset_name: The name of the DataAsset using this DataConnector instance
"""
FILE_PATH_BATCH_SPEC_KEY = "path"
def __init__(
self,
datasource_name: str,
data_asset_name: str,
unnamed_regex_group_prefix: str = "batch_request_param_",
file_path_template_map_fn: Optional[Callable] = None,
whole_directory_path_override: PathStr | None = None,
) -> None:
super().__init__(
datasource_name=datasource_name,
data_asset_name=data_asset_name,
)
self._unnamed_regex_group_prefix: str = unnamed_regex_group_prefix
self._file_path_template_map_fn: Optional[Callable] = file_path_template_map_fn
# allow callers to always treat entire directory as single asset
self._whole_directory_path_override = whole_directory_path_override
# This is a dictionary which maps data_references onto batch_requests.
self._data_references_cache: DefaultDict[
re.Pattern, Dict[str, List[LegacyBatchDefinition] | None]
] = defaultdict(dict)
# Interface Method
@override
def get_batch_definition_list(self, batch_request: BatchRequest) -> List[LegacyBatchDefinition]:
"""
Retrieve batch_definitions and that match batch_request.
First retrieves all batch_definitions that match batch_request
- if batch_request also has a batch_filter, then select batch_definitions that match batch_filter.
Args:
batch_request (BatchRequest): BatchRequest (containing previously validated attributes) to process
Returns:
A list of BatchDefinition objects that match BatchRequest
""" # noqa: E501 # FIXME CoP
legacy_batch_definition_list: List[LegacyBatchDefinition] = (
self._get_unfiltered_batch_definition_list(batch_request=batch_request)
)
data_connector_query_dict: dict[str, dict | slice] = {}
if batch_request.options:
data_connector_query_dict.update(
{
"batch_filter_parameters": {
key: value
for key, value in batch_request.options.items()
if value is not None
}
}
)
data_connector_query_dict.update({"index": batch_request.batch_slice})
batch_filter_obj: BatchFilter = build_batch_filter(
data_connector_query_dict=data_connector_query_dict # type: ignore[arg-type] # FIXME CoP
)
legacy_batch_definition_list = batch_filter_obj.select_from_data_connector_query(
batch_definition_list=legacy_batch_definition_list
)
return legacy_batch_definition_list
@override
def build_batch_spec(self, batch_definition: LegacyBatchDefinition) -> PathBatchSpec:
"""
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (LegacyBatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
"""
batch_spec: BatchSpec = super().build_batch_spec(batch_definition=batch_definition)
return PathBatchSpec(batch_spec)
# Interface Method
@override
def get_data_reference_count(self) -> int:
# todo: in the world of BatchDefinition, this method must accept a BatchRequest.
# In the meantime, we fall back to a regex that matches everything.
regex = self._preprocess_batching_regex(MATCH_ALL_PATTERN)
data_references = self._get_data_references_cache(batching_regex=regex)
return len(data_references)
# Interface Method
@override
def get_matched_data_references(self, regex: re.Pattern | None = None) -> List[str]:
"""
Returns the list of data_references matched by configuration by looping through items in
_data_references_cache and returning data_references that have an associated data_asset.
Returns:
list of data_references that are matched by configuration.
"""
if regex:
regex = self._preprocess_batching_regex(regex)
return self._get_data_references(matched=True, regex=regex)
# Interface Method
@override
def get_matched_data_reference_count(self) -> int:
"""
Returns the list of matched data_references known by this DataConnector from its _data_references_cache
Returns:
number of matched data_references known by this DataConnector.
""" # noqa: E501 # FIXME CoP
return len(self.get_matched_data_references())
# Interface Method
@override
def get_unmatched_data_references(self) -> List[str]:
"""
Returns the list of data_references unmatched by configuration by looping through items in
_data_references_cache and returning data_references that do not have an associated data_asset.
Returns:
list of data_references that are not matched by configuration.
""" # noqa: E501 # FIXME CoP
return self._get_data_references(matched=False)
# Interface Method
@override
def get_unmatched_data_reference_count(self) -> int:
"""
Returns the list of unmatched data_references known by this DataConnector from its _data_references_cache
Returns:
number of unmached data_references known by this DataConnector.
""" # noqa: E501 # FIXME CoP
return len(self.get_unmatched_data_references())
def _get_unfiltered_batch_definition_list(
self, batch_request: BatchRequest[FileNamePartitioner]
) -> list[LegacyBatchDefinition]:
"""Get all batch definitions for all files from a data connector
using the supplied batch request.
Args:
batch_request: Specifies which batch definitions to get from data connector.
Returns:
A list of batch definitions from the data connector based on the batch request.
"""
# this class is overloaded with two separate implementations:
if self._whole_directory_path_override:
return self._get_directory_batch_definition_list(batch_request=batch_request)
else:
return self._get_file_batch_definition_list(batch_request=batch_request)
def _get_file_batch_definition_list(
self, batch_request: BatchRequest
) -> list[LegacyBatchDefinition]:
# Use a combination of a list and set to preserve iteration order
batch_definition_list: list[LegacyBatchDefinition] = list()
batch_definition_set = set()
if batch_request.partitioner:
batching_regex = self._preprocess_batching_regex(batch_request.partitioner.regex)
else:
# all batch requests coming from the V1 API should have a regex; to support legacy code
# we fall back to the MATCH_ALL_PATTERN if it's missing.
batching_regex = self._preprocess_batching_regex(MATCH_ALL_PATTERN)
for batch_definition in self._get_batch_definitions(batching_regex=batching_regex):
if (
self._batch_definition_matches_batch_request(
batch_definition=batch_definition, batch_request=batch_request
)
and batch_definition not in batch_definition_set
):
batch_definition_list.append(batch_definition)
batch_definition_set.add(batch_definition)
return batch_definition_list
def _get_directory_batch_definition_list(
self, batch_request: BatchRequest
) -> list[LegacyBatchDefinition]:
data_directory = self._whole_directory_path_override
batch_definition = LegacyBatchDefinition(
datasource_name=self._datasource_name,
data_connector_name=_DATA_CONNECTOR_NAME,
data_asset_name=self._data_asset_name,
batch_identifiers=make_batch_identifier({"path": data_directory}),
)
return [batch_definition]
def _get_data_references(self, matched: bool, regex: re.Pattern | None = None) -> List[str]:
"""
Returns the list of data_references unmatched by configuration by looping through items in
_data_references_cache and returning data_references that do not have an associated data_asset.
Returns:
list of data_references that are not matched by configuration.
""" # noqa: E501 # FIXME CoP
if not regex:
regex = self._preprocess_batching_regex(MATCH_ALL_PATTERN)
def _matching_criterion(
batch_definition_list: Union[List[LegacyBatchDefinition], None],
) -> bool:
return (
(batch_definition_list is not None) if matched else (batch_definition_list is None)
)
data_reference_mapped_element: Tuple[str, Union[List[LegacyBatchDefinition], None]]
data_references = self._get_data_references_cache(batching_regex=regex)
unmatched_data_references: List[str] = list(
dict(
filter(
lambda data_reference_mapped_element: _matching_criterion(
batch_definition_list=data_reference_mapped_element[1]
),
data_references.items(),
)
).keys()
)
return unmatched_data_references
# Interface Method
@override
def _generate_batch_spec_parameters_from_batch_definition(
self, batch_definition: LegacyBatchDefinition
) -> dict:
"""
This interface method examines "BatchDefinition" object and converts it to exactly one "data_reference" handle,
based on partitioning behavior of given subclass (e.g., Regular Expressions for file path based DataConnector
implementations). Type of "data_reference" is storage dependent. This method is then used to create storage
system specific "BatchSpec" parameters for retrieving "Batch" of data.
Args:
batch_definition: input "BatchDefinition" object
Returns:
dict -- dictionary of "BatchSpec" properties
""" # noqa: E501 # FIXME CoP
# this class is overloaded with two separate implementations:
if self._whole_directory_path_override:
return self._get_batch_spec_params_directory(batch_definition=batch_definition)
else:
return self._get_batch_spec_params_file(batch_definition=batch_definition)
def _get_batch_spec_params_file(self, batch_definition: LegacyBatchDefinition) -> dict:
"""File specific implementation of batch spec parameters"""
if not batch_definition.batching_regex:
raise RuntimeError("BatchDefinition must contain a batching_regex.") # noqa: TRY003 # FIXME CoP
batching_regex = batch_definition.batching_regex
regex_parser = RegExParser(
regex_pattern=batching_regex,
unnamed_regex_group_prefix=self._unnamed_regex_group_prefix,
)
group_names: List[str] = regex_parser.group_names()
path: str = map_batch_definition_to_data_reference_string_using_regex(
batch_definition=batch_definition,
regex_pattern=batching_regex,
group_names=group_names,
)
if not path:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"""No data reference for data asset name "{batch_definition.data_asset_name}" matches the given
batch identifiers {batch_definition.batch_identifiers} from batch definition {batch_definition}.
""" # noqa: E501 # FIXME CoP
)
path = self._get_full_file_path(path=path)
return {FilePathDataConnector.FILE_PATH_BATCH_SPEC_KEY: path}
def _get_batch_spec_params_directory(self, batch_definition: LegacyBatchDefinition) -> dict:
"""Directory specific implementation of batch spec parameters"""
path = self._get_full_file_path(path=str(self._whole_directory_path_override))
return {FilePathDataConnector.FILE_PATH_BATCH_SPEC_KEY: path}
def _preprocess_batching_regex(self, regex: re.Pattern) -> re.Pattern:
"""Add the FILE_PATH_BATCH_SPEC_KEY group to regex if not already present."""
regex_parser = RegExParser(
regex_pattern=regex,
unnamed_regex_group_prefix=self._unnamed_regex_group_prefix,
)
group_names: List[str] = regex_parser.group_names()
if FilePathDataConnector.FILE_PATH_BATCH_SPEC_KEY not in group_names:
pattern: str = regex.pattern
pattern = f"(?P<{FilePathDataConnector.FILE_PATH_BATCH_SPEC_KEY}>{pattern})"
regex = re.compile(pattern)
return regex
def _get_data_references_cache(
self, batching_regex: re.Pattern
) -> Dict[str, List[LegacyBatchDefinition] | None]:
"""Access a map where keys are data references and values are LegacyBatchDefinitions."""
batch_definitions = self._data_references_cache[batching_regex]
if batch_definitions:
return batch_definitions
# Cache was empty so we need to calculate BatchDefinitions
for data_reference in self.get_data_references():
batch_definition = self._build_batch_definition(
data_reference=data_reference, batching_regex=batching_regex
)
if batch_definition:
# storing these as a list seems unnecessary; in this implementation
# there can only be one or zero BatchDefinitions per data reference
batch_definitions[data_reference] = [batch_definition]
else:
batch_definitions[data_reference] = None
return batch_definitions
def _get_batch_definitions(self, batching_regex: re.Pattern) -> List[LegacyBatchDefinition]:
batch_definition_map = self._get_data_references_cache(batching_regex=batching_regex)
batch_definitions = [
batch_definitions[0]
for batch_definitions in batch_definition_map.values()
if batch_definitions is not None
]
return batch_definitions
def _build_batch_definition(
self, data_reference: str, batching_regex: re.Pattern
) -> LegacyBatchDefinition | None:
batch_identifiers = self._build_batch_identifiers(
data_reference=data_reference, batching_regex=batching_regex
)
if batch_identifiers is None:
return None
from great_expectations.core.batch import LegacyBatchDefinition
return LegacyBatchDefinition(
datasource_name=self._datasource_name,
data_connector_name=_DATA_CONNECTOR_NAME,
data_asset_name=self._data_asset_name,
batch_identifiers=batch_identifiers,
batching_regex=batching_regex,
)
def _build_batch_identifiers(
self, data_reference: str, batching_regex: re.Pattern
) -> Optional[IDDict]:
regex_parser = RegExParser(
regex_pattern=batching_regex,
unnamed_regex_group_prefix=self._unnamed_regex_group_prefix,
)
matches: Optional[re.Match] = regex_parser.get_matches(target=data_reference)
if matches is None:
return None
num_all_matched_group_values: int = regex_parser.get_num_all_matched_group_values()
# Check for `(?P<name>)` named group syntax
defined_group_name_to_group_index_mapping: Dict[str, int] = (
regex_parser.get_named_group_name_to_group_index_mapping()
)
defined_group_name_indexes: Set[int] = set(
defined_group_name_to_group_index_mapping.values()
)
defined_group_name_to_group_value_mapping: Dict[str, str] = matches.groupdict()
all_matched_group_values: List[str] = list(matches.groups())
assert len(all_matched_group_values) == num_all_matched_group_values
group_name_to_group_value_mapping: Dict[str, str] = copy.deepcopy(
defined_group_name_to_group_value_mapping
)
idx: int
group_idx: int
matched_group_value: str
for idx, matched_group_value in enumerate(all_matched_group_values):
group_idx = idx + 1
if group_idx not in defined_group_name_indexes:
group_name: str = f"{self._unnamed_regex_group_prefix}{group_idx}"
group_name_to_group_value_mapping[group_name] = matched_group_value
batch_identifiers = make_batch_identifier(group_name_to_group_value_mapping)
return batch_identifiers
@abstractmethod
def _get_full_file_path(self, path: str) -> str:
pass
def map_batch_definition_to_data_reference_string_using_regex(
batch_definition: LegacyBatchDefinition,
regex_pattern: re.Pattern,
group_names: List[str],
) -> str:
if not isinstance(batch_definition, LegacyBatchDefinition):
raise TypeError("batch_definition is not of an instance of type BatchDefinition") # noqa: TRY003 # FIXME CoP
data_asset_name: str = batch_definition.data_asset_name
batch_identifiers: IDDict = batch_definition.batch_identifiers
data_reference: str = convert_batch_identifiers_to_data_reference_string_using_regex(
batch_identifiers=batch_identifiers,
regex_pattern=regex_pattern,
group_names=group_names,
data_asset_name=data_asset_name,
)
return data_reference
def convert_batch_identifiers_to_data_reference_string_using_regex(
batch_identifiers: IDDict,
regex_pattern: re.Pattern,
group_names: List[str],
data_asset_name: Optional[str] = None,
) -> str:
if not isinstance(batch_identifiers, IDDict):
raise TypeError("batch_identifiers is not an instance of type IDDict") # noqa: TRY003 # FIXME CoP
template_arguments: dict = copy.deepcopy(batch_identifiers)
if data_asset_name is not None:
template_arguments["data_asset_name"] = data_asset_name
filepath_template: str = _invert_regex_to_data_reference_template(
regex_pattern=regex_pattern,
group_names=group_names,
)
converted_string: str = filepath_template.format(**template_arguments)
return converted_string
def _invert_regex_to_data_reference_template( # noqa: C901 # too complex
regex_pattern: re.Pattern | str,
group_names: List[str],
) -> str:
r"""Create a string template based on a regex and corresponding list of group names.
For example:
filepath_template = _invert_regex_to_data_reference_template(
regex_pattern=r"^(.+)_(\d+)_(\d+)\.csv$",
group_names=["name", "timestamp", "price"],
)
filepath_template
>> "{name}_{timestamp}_{price}.csv"
Such templates are useful because they can be populated using string substitution:
filepath_template.format(**{
"name": "user_logs",
"timestamp": "20200101",
"price": "250",
})
>> "user_logs_20200101_250.csv"
NOTE Abe 20201017: This method is almost certainly still brittle. I haven't exhaustively mapped the OPCODES in sre_constants
""" # noqa: E501 # FIXME CoP
data_reference_template: str = ""
group_name_index: int = 0
num_groups = len(group_names)
if isinstance(regex_pattern, re.Pattern):
regex_pattern = regex_pattern.pattern
# print("-"*80)
parsed_sre = sre_parse.parse(str(regex_pattern))
for parsed_sre_tuple, char in zip(parsed_sre, list(str(regex_pattern)), strict=False): # type: ignore[call-overload] # FIXME CoP
token, value = parsed_sre_tuple
if token == sre_constants.LITERAL:
# Transcribe the character directly into the template
data_reference_template += chr(value)
elif token == sre_constants.SUBPATTERN:
if not (group_name_index < num_groups):
break
# Replace the captured group with "{next_group_name}" in the template
data_reference_template += f"{{{group_names[group_name_index]}}}"
group_name_index += 1
elif token in [
sre_constants.MAX_REPEAT,
sre_constants.IN,
sre_constants.BRANCH,
sre_constants.ANY,
]:
if group_names:
# Replace the uncaptured group a wildcard in the template
data_reference_template += "*"
else:
# Don't assume that a `.` in a filename should be a star glob
data_reference_template += char
elif token in [
sre_constants.AT,
sre_constants.ASSERT_NOT,
sre_constants.ASSERT,
]:
pass
else:
raise ValueError(f"Unrecognized regex token {token} in regex pattern {regex_pattern}.") # noqa: TRY003 # FIXME CoP
# Collapse adjacent wildcards into a single wildcard
data_reference_template: str = re.sub("\\*+", "*", data_reference_template) # type: ignore[no-redef] # FIXME CoP
return data_reference_template
def sanitize_prefix_for_gcs_and_s3(text: str) -> str:
"""
Takes in a given user-prefix and cleans it to work with file-system traversal methods
(i.e. add '/' to the end of a string meant to represent a directory)
Customized for S3 paths, ignoring the path separator used by the host OS
"""
text = text.strip()
if not text:
return text
path_parts = text.split("/")
if not path_parts: # Empty prefix
return text
if "." in path_parts[-1]: # File, not folder
return text
# Folder, should have trailing /
return f"{text.rstrip('/')}/"
| FilePathDataConnector |
python | doocs__leetcode | solution/0300-0399/0309.Best Time to Buy and Sell Stock with Cooldown/Solution.py | {
"start": 0,
"end": 414
} | class ____:
def maxProfit(self, prices: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= len(prices):
return 0
ans = dfs(i + 1, j)
if j:
ans = max(ans, prices[i] + dfs(i + 2, 0))
else:
ans = max(ans, -prices[i] + dfs(i + 1, 1))
return ans
return dfs(0, 0)
| Solution |
python | marshmallow-code__marshmallow | tests/test_options.py | {
"start": 4356,
"end": 4752
} | class ____:
class ManySchema(Schema):
foo = fields.Str()
class Meta:
many = True
def test_many_by_default(self):
test = self.ManySchema()
assert test.load([{"foo": "bar"}]) == [{"foo": "bar"}]
def test_explicit_single(self):
test = self.ManySchema(many=False)
assert test.load({"foo": "bar"}) == {"foo": "bar"}
| TestManyOption |
python | django__django | tests/serializers/models/data.py | {
"start": 6081,
"end": 6159
} | class ____(models.Model):
data = models.UUIDField(primary_key=True)
| UUIDData |
python | django__django | tests/db_functions/text/test_upper.py | {
"start": 194,
"end": 1393
} | class ____(TestCase):
def test_basic(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(upper_name=Upper("name"))
self.assertQuerySetEqual(
authors.order_by("name"),
[
"JOHN SMITH",
"RHONDA",
],
lambda a: a.upper_name,
)
Author.objects.update(name=Upper("name"))
self.assertQuerySetEqual(
authors.order_by("name"),
[
("JOHN SMITH", "JOHN SMITH"),
("RHONDA", "RHONDA"),
],
lambda a: (a.upper_name, a.name),
)
def test_transform(self):
with register_lookup(CharField, Upper):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__exact="JOHN SMITH")
self.assertQuerySetEqual(
authors.order_by("name"),
[
"John Smith",
],
lambda a: a.name,
)
| UpperTests |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_indexing.py | {
"start": 3892,
"end": 4281
} | class ____:
def test_set_item(self, arr_data):
arr = SparseArray(arr_data).copy()
def setitem():
arr[5] = 3
def setslice():
arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
| TestSetitem |
python | huggingface__transformers | examples/modular-transformers/modular_multimodal2.py | {
"start": 757,
"end": 817
} | class ____(CLIPAttention):
pass
| Multimodal2VisionAttention |
python | openai__openai-python | src/openai/resources/beta/realtime/transcription_sessions.py | {
"start": 13811,
"end": 14124
} | class ____:
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = async_to_streamed_response_wrapper(
transcription_sessions.create,
)
| AsyncTranscriptionSessionsWithStreamingResponse |
python | getlogbook__logbook | src/logbook/base.py | {
"start": 8108,
"end": 9692
} | class ____(StackedObject):
"""A nested setup can be used to configure multiple handlers
and processors at once.
"""
def __init__(self, objects=None):
self.objects = list(objects or ())
def push_application(self):
for obj in self.objects:
obj.push_application()
def pop_application(self):
for obj in reversed(self.objects):
obj.pop_application()
@deprecated("Use push_context instead")
def push_thread(self):
for obj in self.objects:
obj.push_context()
@deprecated("Use pop_context instead")
def pop_thread(self):
for obj in reversed(self.objects):
obj.pop_context()
@deprecated("Use push_context instead")
def push_greenlet(self):
for obj in self.objects:
obj.push_context()
@deprecated("Use pop_context instead")
def pop_greenlet(self):
for obj in reversed(self.objects):
obj.pop_context()
def push_context(self):
for obj in self.objects:
obj.push_context()
def pop_context(self):
for obj in reversed(self.objects):
obj.pop_context()
@deprecated("`with obj.greenletbound()` is deprecated, use `with obj:` instead")
def greenletbound(self):
return self
@deprecated("`with obj.contextbound()` is deprecated, use `with obj:` instead")
def contextbound(self):
return self
@deprecated("`with obj.threadbound()` is deprecated, use `with obj:` instead")
def threadbound(self):
return self
| NestedSetup |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/hooks/tpt.py | {
"start": 1608,
"end": 11646
} | class ____(TtuHook):
"""
Hook for executing Teradata Parallel Transporter (TPT) operations.
This hook provides methods to execute TPT operations both locally and remotely via SSH.
It supports DDL operations using tbuild utility. It extends the `TtuHook` and integrates
with Airflow's SSHHook for remote execution.
The TPT operations are used to interact with Teradata databases for DDL operations
such as creating, altering, or dropping tables.
Features:
- Supports both local and remote execution of TPT operations.
- Secure file encryption for remote transfers.
- Comprehensive error handling and logging.
- Resource cleanup and management.
.. seealso::
- :ref:`hook API connection <howto/connection:teradata>`
:param ssh_conn_id: SSH connection ID for remote execution. If None, executes locally.
"""
def __init__(self, ssh_conn_id: str | None = None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.ssh_conn_id = ssh_conn_id
self.ssh_hook = SSHHook(ssh_conn_id=ssh_conn_id) if ssh_conn_id else None
def execute_ddl(
self,
tpt_script: str | list[str],
remote_working_dir: str,
) -> int:
"""
Execute a DDL statement using TPT.
Args:
tpt_script: TPT script content as string or list of strings
remote_working_dir: Remote working directory for SSH execution
Returns:
Exit code from the TPT operation
Raises:
ValueError: If tpt_script is empty or invalid
RuntimeError: Non-zero tbuild exit status or unexpected execution failure
ConnectionError: SSH connection not established or fails
TimeoutError: SSH connection/network timeout
FileNotFoundError: tbuild binary not found in PATH
"""
if not tpt_script:
raise ValueError("TPT script must not be empty.")
tpt_script_content = "\n".join(tpt_script) if isinstance(tpt_script, list) else tpt_script
# Validate script content
if not tpt_script_content.strip():
raise ValueError("TPT script content must not be empty after processing.")
if self.ssh_hook:
self.log.info("Executing DDL statements via SSH on remote host")
return self._execute_tbuild_via_ssh(tpt_script_content, remote_working_dir)
self.log.info("Executing DDL statements locally")
return self._execute_tbuild_locally(tpt_script_content)
def _execute_tbuild_via_ssh(
self,
tpt_script_content: str,
remote_working_dir: str,
) -> int:
"""Execute tbuild command via SSH."""
with self.preferred_temp_directory() as tmp_dir:
local_script_file = os.path.join(tmp_dir, f"tbuild_script_{uuid.uuid4().hex}.sql")
write_file(local_script_file, tpt_script_content)
encrypted_file_path = f"{local_script_file}.enc"
remote_encrypted_script_file = os.path.join(
remote_working_dir, os.path.basename(encrypted_file_path)
)
remote_script_file = os.path.join(remote_working_dir, os.path.basename(local_script_file))
job_name = f"tbuild_job_{uuid.uuid4().hex}"
try:
if not self.ssh_hook:
raise ConnectionError("SSH connection is not established. `ssh_hook` is None or invalid.")
with self.ssh_hook.get_conn() as ssh_client:
verify_tpt_utility_on_remote_host(ssh_client, "tbuild", logging.getLogger(__name__))
password = generate_random_password()
generate_encrypted_file_with_openssl(local_script_file, password, encrypted_file_path)
transfer_file_sftp(
ssh_client,
encrypted_file_path,
remote_encrypted_script_file,
logging.getLogger(__name__),
)
decrypt_remote_file(
ssh_client,
remote_encrypted_script_file,
remote_script_file,
password,
logging.getLogger(__name__),
)
set_remote_file_permissions(ssh_client, remote_script_file, logging.getLogger(__name__))
tbuild_cmd = ["tbuild", "-f", remote_script_file, job_name]
self.log.info("=" * 80)
self.log.info("Executing tbuild command on remote server: %s", " ".join(tbuild_cmd))
self.log.info("=" * 80)
exit_status, output, error = execute_remote_command(ssh_client, " ".join(tbuild_cmd))
self.log.info("tbuild command output:\n%s", output)
self.log.info("tbuild command exited with status %s", exit_status)
# Clean up remote files before checking exit status
remote_secure_delete(
ssh_client,
[remote_encrypted_script_file, remote_script_file],
logging.getLogger(__name__),
)
if exit_status != 0:
raise RuntimeError(f"tbuild command failed with exit code {exit_status}: {error}")
return exit_status
except ConnectionError:
# Re-raise ConnectionError as-is (don't convert to TimeoutError)
raise
except (OSError, socket.gaierror) as e:
self.log.error("SSH connection timed out: %s", str(e))
raise TimeoutError(
"SSH connection timed out. Please check the network or server availability."
) from e
except SSHException as e:
raise ConnectionError(f"SSH error during connection: {str(e)}") from e
except RuntimeError:
raise
except Exception as e:
raise RuntimeError(
f"Unexpected error while executing tbuild script on remote machine: {str(e)}"
) from e
finally:
# Clean up local files
secure_delete(encrypted_file_path, logging.getLogger(__name__))
secure_delete(local_script_file, logging.getLogger(__name__))
def _execute_tbuild_locally(
self,
tpt_script_content: str,
) -> int:
"""Execute tbuild command locally."""
with self.preferred_temp_directory() as tmp_dir:
local_script_file = os.path.join(tmp_dir, f"tbuild_script_{uuid.uuid4().hex}.sql")
write_file(local_script_file, tpt_script_content)
# Set file permission to read-only for the current user (no permissions for group/others)
set_local_file_permissions(local_script_file, logging.getLogger(__name__))
job_name = f"tbuild_job_{uuid.uuid4().hex}"
tbuild_cmd = ["tbuild", "-f", local_script_file, job_name]
if not shutil.which("tbuild"):
raise FileNotFoundError("tbuild binary not found in PATH.")
sp = None
try:
self.log.info("=" * 80)
self.log.info("Executing tbuild command: %s", " ".join(tbuild_cmd))
self.log.info("=" * 80)
sp = subprocess.Popen(
tbuild_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, start_new_session=True
)
error_lines = []
if sp.stdout is not None:
for line in iter(sp.stdout.readline, b""):
decoded_line = line.decode("UTF-8").strip()
self.log.info(decoded_line)
if "error" in decoded_line.lower():
error_lines.append(decoded_line)
sp.wait()
self.log.info("tbuild command exited with return code %s", sp.returncode)
if sp.returncode != 0:
error_msg = "\n".join(error_lines) if error_lines else "Unknown error"
raise RuntimeError(f"tbuild command failed with return code {sp.returncode}: {error_msg}")
return sp.returncode
except RuntimeError:
raise
except Exception as e:
self.log.error("Error executing tbuild command: %s", str(e))
raise RuntimeError(f"Error executing tbuild command: {str(e)}") from e
finally:
secure_delete(local_script_file, logging.getLogger(__name__))
terminate_subprocess(sp, logging.getLogger(__name__))
def on_kill(self) -> None:
"""
Handle cleanup when the task is killed.
This method is called when Airflow needs to terminate the hook,
typically during task cancellation or shutdown.
"""
self.log.info("TPT Hook cleanup initiated")
# Note: SSH connections are managed by context managers and will be cleaned up automatically
# Subprocesses are handled by terminate_subprocess in the finally blocks
# This method is available for future enhancements if needed
@contextmanager
def preferred_temp_directory(self, prefix: str = "tpt_") -> Generator[str, None, None]:
try:
temp_dir = tempfile.gettempdir()
if not os.path.isdir(temp_dir) or not os.access(temp_dir, os.W_OK):
raise OSError("OS temp dir not usable")
except Exception:
temp_dir = self.get_airflow_home_dir()
with tempfile.TemporaryDirectory(dir=temp_dir, prefix=prefix) as tmp:
yield tmp
def get_airflow_home_dir(self) -> str:
"""Return the Airflow home directory."""
return os.environ.get("AIRFLOW_HOME", os.path.expanduser("~/airflow"))
| TptHook |
python | getsentry__sentry | src/sentry/rules/actions/integrations/base.py | {
"start": 746,
"end": 5768
} | class ____(EventAction, abc.ABC):
"""Intermediate abstract class to help DRY some event actions code."""
@property
@abc.abstractmethod
def prompt(self) -> str:
pass
@property
@abc.abstractmethod
def provider(self) -> str:
pass
@property
@abc.abstractmethod
def integration_key(self) -> str:
pass
@override
def future(
self,
callback: Callable[[GroupEvent, Sequence[RuleFuture]], None],
key: str | None = None,
**kwargs: Any,
) -> CallbackFuture:
def wrapped_callback(event: GroupEvent, futures: Sequence[RuleFuture]) -> None:
with sentry_sdk.start_span(
op="IntegrationEventAction.future",
name=type(self).__name__,
):
callback(event, futures)
return super().future(wrapped_callback, key, **kwargs)
def is_enabled(self) -> bool:
enabled: bool = bool(self.get_integrations())
return enabled
def get_integration_name(self) -> str:
"""Get the integration's name for the label."""
integration = self.get_integration()
if not integration:
return "[removed]"
_name: str = integration.name
return _name
def get_integrations(self) -> list[RpcIntegration]:
return integration_service.get_integrations(
organization_id=self.project.organization_id,
status=OrganizationStatus.ACTIVE,
org_integration_status=OrganizationStatus.ACTIVE,
providers=[self.provider],
)
def get_integration_id(self) -> int:
integration_id: str | None = self.get_option(self.integration_key)
if integration_id:
return int(integration_id)
return 0
def get_integration(self) -> RpcIntegration | None:
"""
Uses the required class variables `provider` and `integration_key` with
RuleBase.get_option to get the integration object from DB.
"""
for integration in integration_service.get_integrations(
organization_id=self.project.organization_id,
status=OrganizationStatus.ACTIVE,
org_integration_status=OrganizationStatus.ACTIVE,
providers=[self.provider],
):
if integration.id == self.get_integration_id():
return integration
return None
def get_organization_integration(self) -> RpcOrganizationIntegration | None:
return integration_service.get_organization_integration(
integration_id=self.get_integration_id(), organization_id=self.project.organization_id
)
def record_notification_sent(
self,
event: GroupEvent,
external_id: str,
rule: Rule | None = None,
notification_uuid: str | None = None,
) -> None:
from sentry.integrations.discord.analytics import DiscordIntegrationNotificationSent
from sentry.integrations.msteams.analytics import MSTeamsIntegrationNotificationSent
from sentry.integrations.opsgenie.analytics import OpsgenieIntegrationNotificationSent
from sentry.integrations.pagerduty.analytics import PagerdutyIntegrationNotificationSent
from sentry.integrations.slack.analytics import SlackIntegrationNotificationSent
# Currently these actions can only be triggered by issue alerts
PROVIDER_TO_EVENT_CLASS = {
"discord": DiscordIntegrationNotificationSent,
"msteams": MSTeamsIntegrationNotificationSent,
"opsgenie": OpsgenieIntegrationNotificationSent,
"pagerduty": PagerdutyIntegrationNotificationSent,
"slack": SlackIntegrationNotificationSent,
"email": EmailNotificationSent,
}
try:
if event_class := PROVIDER_TO_EVENT_CLASS.get(self.provider):
analytics.record(
event_class(
organization_id=event.organization.id,
project_id=event.project_id,
group_id=event.group_id,
notification_uuid=notification_uuid if notification_uuid else "",
alert_id=rule.id if rule else None,
category="issue_alert",
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
try:
analytics.record(
AlertSentEvent(
provider=self.provider,
alert_id=rule.id if rule else "",
alert_type="issue_alert",
organization_id=event.organization.id,
project_id=event.project_id,
external_id=external_id,
notification_uuid=notification_uuid if notification_uuid else "",
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
| IntegrationEventAction |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 24853,
"end": 25350
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type.container)
members = [
# The meminfo data points to a ListPayload (shared with the
# original list object)
('meminfo', types.MemInfoPointer(payload_type)),
('index', types.EphemeralPointer(types.intp)),
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetEntry)
| ListIterModel |
python | celery__celery | t/unit/app/test_amqp.py | {
"start": 4331,
"end": 5516
} | class ____:
@pytest.mark.parametrize('default_queue_type', ['classic', 'quorum'])
@pytest.mark.parametrize('name,exchange,rkey', [
('default', None, None),
('default', 'exchange', None),
('default', 'exchange', 'routing_key'),
('default', None, 'routing_key'),
])
def test_setting_default_queue(self, name, exchange, rkey, default_queue_type):
self.app.conf.task_queues = {}
self.app.conf.task_default_exchange = exchange
self.app.conf.task_default_routing_key = rkey
self.app.conf.task_default_queue = name
self.app.conf.task_default_queue_type = default_queue_type
assert self.app.amqp.queues.default_exchange.name == exchange or name
queues = dict(self.app.amqp.queues)
assert len(queues) == 1
queue = queues[name]
assert queue.exchange.name == exchange or name
assert queue.exchange.type == 'direct'
assert queue.routing_key == rkey or name
if default_queue_type == 'quorum':
assert queue.queue_arguments == {'x-queue-type': 'quorum'}
else:
assert queue.queue_arguments is None
| test_default_queues |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/custom_job.py | {
"start": 2229,
"end": 9856
} | class ____(GoogleCloudBaseOperator):
"""The base class for operators that launch Custom jobs on VertexAI."""
def __init__(
self,
*,
project_id: str,
region: str,
display_name: str,
container_uri: str,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# RUN
dataset_id: str | None = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
psc_interface_config: PscInterfaceConfig | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.display_name = display_name
# START Custom
self.container_uri = container_uri
self.model_serving_container_image_uri = model_serving_container_image_uri
self.model_serving_container_predict_route = model_serving_container_predict_route
self.model_serving_container_health_route = model_serving_container_health_route
self.model_serving_container_command = model_serving_container_command
self.model_serving_container_args = model_serving_container_args
self.model_serving_container_environment_variables = model_serving_container_environment_variables
self.model_serving_container_ports = model_serving_container_ports
self.model_description = model_description
self.model_instance_schema_uri = model_instance_schema_uri
self.model_parameters_schema_uri = model_parameters_schema_uri
self.model_prediction_schema_uri = model_prediction_schema_uri
self.labels = labels
self.parent_model = parent_model
self.is_default_version = is_default_version
self.model_version_aliases = model_version_aliases
self.model_version_description = model_version_description
self.training_encryption_spec_key_name = training_encryption_spec_key_name
self.model_encryption_spec_key_name = model_encryption_spec_key_name
self.staging_bucket = staging_bucket
# END Custom
# START Run param
self.dataset_id = dataset_id
self.annotation_schema_uri = annotation_schema_uri
self.model_display_name = model_display_name
self.model_labels = model_labels
self.base_output_dir = base_output_dir
self.service_account = service_account
self.network = network
self.bigquery_destination = bigquery_destination
self.args = args
self.environment_variables = environment_variables
self.replica_count = replica_count
self.machine_type = machine_type
self.accelerator_type = accelerator_type
self.accelerator_count = accelerator_count
self.boot_disk_type = boot_disk_type
self.boot_disk_size_gb = boot_disk_size_gb
self.training_fraction_split = training_fraction_split
self.validation_fraction_split = validation_fraction_split
self.test_fraction_split = test_fraction_split
self.training_filter_split = training_filter_split
self.validation_filter_split = validation_filter_split
self.test_filter_split = test_filter_split
self.predefined_split_column_name = predefined_split_column_name
self.timestamp_split_column_name = timestamp_split_column_name
self.tensorboard = tensorboard
self.psc_interface_config = psc_interface_config
# END Run param
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"project_id": self.project_id,
}
def execute_complete(self, context: Context, event: dict[str, Any]) -> dict[str, Any] | None:
if event["status"] == "error":
raise AirflowException(event["message"])
training_pipeline = event["job"]
custom_job_id = self.hook.extract_custom_job_id_from_training_pipeline(training_pipeline)
context["ti"].xcom_push(key="custom_job_id", value=custom_job_id)
try:
model = training_pipeline["model_to_upload"]
model_id = self.hook.extract_model_id(model)
context["ti"].xcom_push(key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, model_id=model_id)
return model
except KeyError:
self.log.warning(
"It is impossible to get the Model. "
"The Training Pipeline did not produce a Managed Model because it was not "
"configured to upload a Model. Please ensure that the 'model_serving_container_image_uri' "
"and 'model_display_name' parameters are passed in when creating a Training Pipeline, "
"and check that your training script saves the model to os.environ['AIP_MODEL_DIR']."
)
return None
@cached_property
def hook(self) -> CustomJobHook:
return CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
def on_kill(self) -> None:
"""Act as a callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_job()
| CustomTrainingJobBaseOperator |
python | apache__airflow | airflow-e2e-tests/tests/airflow_e2e_tests/e2e_test_utils/clients.py | {
"start": 1146,
"end": 4723
} | class ____:
"""Client for interacting with the Airflow REST API."""
def __init__(self):
self.session = requests.Session()
@cached_property
def token(self):
Retry.DEFAULT_BACKOFF_MAX = 32
retry = Retry(total=10, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
session = requests.Session()
session.mount("http://", HTTPAdapter(max_retries=retry))
session.mount("https://", HTTPAdapter(max_retries=retry))
api_server_url = DOCKER_COMPOSE_HOST_PORT
if not api_server_url.startswith(("http://", "https://")):
api_server_url = "http://" + DOCKER_COMPOSE_HOST_PORT
url = f"{api_server_url}/auth/token"
login_response = session.post(
url,
json={"username": AIRFLOW_WWW_USER_USERNAME, "password": AIRFLOW_WWW_USER_PASSWORD},
)
access_token = login_response.json().get("access_token")
assert access_token, (
f"Failed to get JWT token from redirect url {url} with status code {login_response}"
)
return access_token
def _make_request(
self,
method: str,
endpoint: str,
base_url: str = f"http://{DOCKER_COMPOSE_HOST_PORT}/api/v2",
**kwargs,
):
response = requests.request(
method=method,
url=f"{base_url}/{endpoint}",
headers={"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"},
**kwargs,
)
response.raise_for_status()
return response.json()
def un_pause_dag(self, dag_id: str):
return self._make_request(
method="PATCH",
endpoint=f"dags/{dag_id}",
json={"is_paused": False},
)
def trigger_dag(self, dag_id: str, json=None):
if json is None:
json = {}
return self._make_request(method="POST", endpoint=f"dags/{dag_id}/dagRuns", json=json)
def wait_for_dag_run(self, dag_id: str, run_id: str, timeout=300, check_interval=5):
start_time = time.time()
while time.time() - start_time < timeout:
response = self._make_request(
method="GET",
endpoint=f"dags/{dag_id}/dagRuns/{run_id}",
)
state = response.get("state")
if state in {"success", "failed"}:
return state
time.sleep(check_interval)
raise TimeoutError(f"DAG run {run_id} for DAG {dag_id} did not complete within {timeout} seconds.")
def get_xcom_value(self, dag_id: str, task_id: str, run_id: str, key: str, map_index=-1):
return self._make_request(
method="GET",
endpoint=f"dags/{dag_id}/dagRuns/{run_id}/taskInstances/{task_id}/xcomEntries/{key}?map_index={map_index}",
)
def trigger_dag_and_wait(self, dag_id: str, json=None):
"""Trigger a DAG and wait for it to complete."""
self.un_pause_dag(dag_id)
resp = self.trigger_dag(dag_id, json=json or {"logical_date": datetime.now(timezone.utc).isoformat()})
# Wait for the DAG run to complete
return self.wait_for_dag_run(
dag_id=dag_id,
run_id=resp["dag_run_id"],
)
def get_task_logs(self, dag_id: str, run_id: str, task_id: str, try_number: int = 1):
"""Get task logs via API."""
return self._make_request(
method="GET",
endpoint=f"dags/{dag_id}/dagRuns/{run_id}/taskInstances/{task_id}/logs/{try_number}",
)
| AirflowClient |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_stream.py | {
"start": 233,
"end": 3521
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | fluentpython__example-code | 21-class-metaprog/bulkfood/model_v6.py | {
"start": 802,
"end": 1004
} | class ____(Validated):
"""a number greater than zero"""
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
| Quantity |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/model_call_limit.py | {
"start": 1693,
"end": 2715
} | class ____(Exception):
"""Exception raised when model call limits are exceeded.
This exception is raised when the configured exit behavior is `'error'` and either
the thread or run model call limit has been exceeded.
"""
def __init__(
self,
thread_count: int,
run_count: int,
thread_limit: int | None,
run_limit: int | None,
) -> None:
"""Initialize the exception with call count information.
Args:
thread_count: Current thread model call count.
run_count: Current run model call count.
thread_limit: Thread model call limit (if set).
run_limit: Run model call limit (if set).
"""
self.thread_count = thread_count
self.run_count = run_count
self.thread_limit = thread_limit
self.run_limit = run_limit
msg = _build_limit_exceeded_message(thread_count, run_count, thread_limit, run_limit)
super().__init__(msg)
| ModelCallLimitExceededError |
python | tensorflow__tensorflow | tensorflow/compiler/tests/reduce_ops_test.py | {
"start": 7471,
"end": 9591
} | class ____(xla_test.XLATestCase):
def _testReduceSum(self,
expected_result,
dtype,
test_inputs,
rtol=1e-3,
atol=1e-4):
"""Tests reduce sum on a list of input arrays.
For each array in test_inputs, check that performing reduce sum on the array
produces a value that is close to the expected result.
Args:
expected_result: the expected result.
dtype: the data type of the reduce sum operation.
test_inputs: a list of input arrays for the reduce sum operation.
rtol: the relative error.
atol: the absolute error.
"""
for test_input in test_inputs:
with self.session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = math_ops.reduce_sum(a, index)
result = sess.run(out, {
a: np.array(test_input, dtype=dtype),
index: [0]
})
# Compare the results using float32 type.
self.assertAllClose(
np.float32(result),
np.float32(expected_result),
rtol=rtol,
atol=atol)
def testReduceSumF16(self):
"""Tests the reduce sum of float16 doesn't lose too much precision."""
if np.float16 not in self.all_types:
return
f16_max = np.finfo(np.float16).max
self._testReduceSum(
f16_max, np.float16,
itertools.permutations([f16_max, f16_max, f16_max * (-1.0)], 3))
def testReduceSumBF16(self):
"""Tests the reduce sum of bfloat16 doesn't lose too much precision."""
if dtypes.bfloat16.as_numpy_dtype not in self.all_types:
return
bf16_max = np.float32(dtypes.bfloat16.max)
f32_max = dtypes.float32.max
value = min(bf16_max, f32_max - bf16_max) / 2
self._testReduceSum(
dtypes.bfloat16.as_numpy_dtype(value), dtypes.bfloat16.as_numpy_dtype,
itertools.permutations([bf16_max, value, bf16_max * (-1.0)], 3))
if __name__ == '__main__':
googletest.main()
| ReduceOpPrecisionTest |
python | realpython__materials | django-todo-list/source_code_final/todo_app/models.py | {
"start": 207,
"end": 427
} | class ____(models.Model):
title = models.CharField(max_length=100, unique=True)
def get_absolute_url(self):
return reverse("list", args=[self.id])
def __str__(self):
return self.title
| ToDoList |
python | mlflow__mlflow | mlflow/llama_index/pyfunc_wrapper.py | {
"start": 3379,
"end": 5259
} | class ____(_LlamaIndexModelWrapperBase):
@property
def engine_type(self):
return CHAT_ENGINE_NAME
def _predict_single(self, *args, **kwargs) -> str:
return self._llama_model.chat(*args, **kwargs).response
@staticmethod
def _convert_chat_message_history_to_chat_message_objects(
data: dict[str, Any],
) -> dict[str, Any]:
from llama_index.core.llms import ChatMessage
if chat_message_history := data.get(_CHAT_MESSAGE_HISTORY_PARAMETER_NAME):
if isinstance(chat_message_history, list):
if all(isinstance(message, dict) for message in chat_message_history):
data[_CHAT_MESSAGE_HISTORY_PARAMETER_NAME] = [
ChatMessage(**message) for message in chat_message_history
]
else:
raise ValueError(
f"Unsupported input type: {type(chat_message_history)}. "
"It must be a list of dicts."
)
return data
def _format_predict_input(self, data) -> str | dict[str, Any] | list[Any]:
data = _convert_llm_input_data_with_unwrapping(data)
if isinstance(data, str):
return data
elif isinstance(data, dict):
return self._convert_chat_message_history_to_chat_message_objects(data)
elif isinstance(data, list):
# NB: handle pandas returning lists when there is a single row
prediction_input = [self._format_predict_input(d) for d in data]
return prediction_input if len(prediction_input) > 1 else prediction_input[0]
else:
raise ValueError(
f"Unsupported input type: {type(data)}. It must be one of "
"[str, dict, list, numpy.ndarray, pandas.DataFrame]"
)
| ChatEngineWrapper |
python | numba__numba | numba/core/types/scalars.py | {
"start": 2987,
"end": 3505
} | class ____(Number):
def __init__(self, *args, **kws):
super(Float, self).__init__(*args, **kws)
# Determine bitwidth
assert self.name.startswith('float')
bitwidth = int(self.name[5:])
self.bitwidth = bitwidth
def cast_python_value(self, value):
return getattr(np, self.name)(value)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.bitwidth < other.bitwidth
@total_ordering
| Float |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 86876,
"end": 87182
} | class ____(sgqlc.types.Enum):
"""Properties by which repository invitation connections can be
ordered.
Enumeration Choices:
* `CREATED_AT`: Order repository invitations by creation time
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| RepositoryInvitationOrderField |
python | pandas-dev__pandas | pandas/tests/scalar/period/test_asfreq.py | {
"start": 266,
"end": 38135
} | class ____:
"""Test frequency conversion of date objects"""
@pytest.mark.filterwarnings("ignore:Period with BDay:FutureWarning")
@pytest.mark.parametrize("freq", ["Y", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="Y", year=2007)
result1 = val.asfreq("5min")
result2 = val.asfreq("min")
expected = Period("2007-12-31 23:59", freq="min")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5min"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "min"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
msg = INVALID_FREQ_ERR_MSG
ival_A = Period(freq="Y", year=2007)
ival_AJAN = Period(freq="Y-JAN", year=2007)
ival_AJUN = Period(freq="Y-JUN", year=2007)
ival_ANOV = Period(freq="Y-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="h", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "s") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "s") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_A.asfreq("B", "s") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "s") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
with pytest.raises(ValueError, match=msg):
assert ival_A.asfreq("H", "s") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "s") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
with pytest.raises(ValueError, match=msg):
assert ival_A.asfreq("T", "s") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "s") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "s") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "s") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("Y") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="Y", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="h", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("Y") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("Y") == ival_Q_to_A
assert ival_Q.asfreq("M", "s") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "s") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_Q.asfreq("B", "s") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "s") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("h", "s") == ival_Q_to_H_start
assert ival_Q.asfreq("h", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "s") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("s", "s") == ival_Q_to_S_start
assert ival_Q.asfreq("s", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "s") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "s") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="Y", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="h", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("Y") == ival_M_to_A
assert ival_M_end_of_year.asfreq("Y") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "s") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_M.asfreq("B", "s") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "s") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("h", "s") == ival_M_to_H_start
assert ival_M.asfreq("h", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "s") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("s", "s") == ival_M_to_S_start
assert ival_M.asfreq("s", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="Y", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="Y", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="Y", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="h", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("Y") == ival_W_to_A
assert ival_W_end_of_year.asfreq("Y") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_W.asfreq("B", "s") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "s") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "s") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "s") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "s") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "s") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "s") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "s") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "s") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("h", "s") == ival_W_to_H_start
assert ival_W.asfreq("h", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "s") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("s", "s") == ival_W_to_S_start
assert ival_W.asfreq("s", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="Y", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("Y") == ival_B_to_A
assert ival_B_end_of_year.asfreq("Y") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("h", "s") == ival_B_to_H_start
assert ival_B.asfreq("h", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "s") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("s", "s") == ival_B_to_S_start
assert ival_B.asfreq("s", "E") == ival_B_to_S_end
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="Y", year=2007)
ival_Deoq_to_AJAN = Period(freq="Y-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="Y-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="Y-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("Y") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Y-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("Y-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("Y-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("Y") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "s") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "s") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("h", "s") == ival_D_to_H_start
assert ival_D.asfreq("h", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "s") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("s", "s") == ival_D_to_S_start
assert ival_D.asfreq("s", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="h", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="h", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="h", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="h", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day = Period(freq="h", year=2007, month=1, day=1, hour=23)
ival_H_end_of_bus = Period(freq="h", year=2007, month=1, day=1, hour=23)
ival_H_to_A = Period(freq="Y", year=2007)
ival_H_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_H_to_M = Period(freq="M", year=2007, month=1)
ival_H_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_H_to_D = Period(freq="D", year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_H_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_H_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_H_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_H_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_H_to_S_end = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
assert ival_H.asfreq("Y") == ival_H_to_A
assert ival_H_end_of_year.asfreq("Y") == ival_H_to_A
assert ival_H.asfreq("Q") == ival_H_to_Q
assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q
assert ival_H.asfreq("M") == ival_H_to_M
assert ival_H_end_of_month.asfreq("M") == ival_H_to_M
assert ival_H.asfreq("W") == ival_H_to_W
assert ival_H_end_of_week.asfreq("W") == ival_H_to_W
assert ival_H.asfreq("D") == ival_H_to_D
assert ival_H_end_of_day.asfreq("D") == ival_H_to_D
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_H.asfreq("B") == ival_H_to_B
assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B
assert ival_H.asfreq("Min", "s") == ival_H_to_T_start
assert ival_H.asfreq("Min", "E") == ival_H_to_T_end
assert ival_H.asfreq("s", "s") == ival_H_to_S_start
assert ival_H.asfreq("s", "E") == ival_H_to_S_end
assert ival_H.asfreq("h") == ival_H
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
ival_T_end_of_year = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_T_end_of_quarter = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_T_end_of_month = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_T_end_of_week = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_T_end_of_day = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_bus = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_T_end_of_hour = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_T_to_A = Period(freq="Y", year=2007)
ival_T_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_T_to_M = Period(freq="M", year=2007, month=1)
ival_T_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_T_to_D = Period(freq="D", year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_T_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_T_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_T_to_S_end = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
assert ival_T.asfreq("Y") == ival_T_to_A
assert ival_T_end_of_year.asfreq("Y") == ival_T_to_A
assert ival_T.asfreq("Q") == ival_T_to_Q
assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q
assert ival_T.asfreq("M") == ival_T_to_M
assert ival_T_end_of_month.asfreq("M") == ival_T_to_M
assert ival_T.asfreq("W") == ival_T_to_W
assert ival_T_end_of_week.asfreq("W") == ival_T_to_W
assert ival_T.asfreq("D") == ival_T_to_D
assert ival_T_end_of_day.asfreq("D") == ival_T_to_D
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_T.asfreq("B") == ival_T_to_B
assert ival_T_end_of_bus.asfreq("B") == ival_T_to_B
assert ival_T.asfreq("h") == ival_T_to_H
assert ival_T_end_of_hour.asfreq("h") == ival_T_to_H
assert ival_T.asfreq("s", "s") == ival_T_to_S_start
assert ival_T.asfreq("s", "E") == ival_T_to_S_end
assert ival_T.asfreq("Min") == ival_T
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0)
ival_S_end_of_year = Period(
freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_quarter = Period(
freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_month = Period(
freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
ival_S_end_of_week = Period(
freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
ival_S_end_of_day = Period(
freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_bus = Period(
freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
ival_S_end_of_hour = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
ival_S_end_of_minute = Period(
freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59
)
ival_S_to_A = Period(freq="Y", year=2007)
ival_S_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_S_to_M = Period(freq="M", year=2007, month=1)
ival_S_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_S_to_D = Period(freq="D", year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
ival_S_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_S_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0)
ival_S_to_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
assert ival_S.asfreq("Y") == ival_S_to_A
assert ival_S_end_of_year.asfreq("Y") == ival_S_to_A
assert ival_S.asfreq("Q") == ival_S_to_Q
assert ival_S_end_of_quarter.asfreq("Q") == ival_S_to_Q
assert ival_S.asfreq("M") == ival_S_to_M
assert ival_S_end_of_month.asfreq("M") == ival_S_to_M
assert ival_S.asfreq("W") == ival_S_to_W
assert ival_S_end_of_week.asfreq("W") == ival_S_to_W
assert ival_S.asfreq("D") == ival_S_to_D
assert ival_S_end_of_day.asfreq("D") == ival_S_to_D
with tm.assert_produces_warning(FutureWarning, match=bday_msg):
assert ival_S.asfreq("B") == ival_S_to_B
assert ival_S_end_of_bus.asfreq("B") == ival_S_to_B
assert ival_S.asfreq("h") == ival_S_to_H
assert ival_S_end_of_hour.asfreq("h") == ival_S_to_H
assert ival_S.asfreq("Min") == ival_S_to_T
assert ival_S_end_of_minute.asfreq("Min") == ival_S_to_T
assert ival_S.asfreq("s") == ival_S
def test_conv_microsecond(self):
# GH#31475 Avoid floating point errors dropping the start_time to
# before the beginning of the Period
per = Period("2020-01-30 15:57:27.576166", freq="us")
assert per.ordinal == 1580399847576166
start = per.start_time
expected = Timestamp("2020-01-30 15:57:27.576166")
assert start == expected
assert start._value == per.ordinal * 1000
per2 = Period("2300-01-01", "us")
msg = "2300-01-01"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per2.start_time
with pytest.raises(OutOfBoundsDatetime, match=msg):
per2.end_time
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq="Y", year=2007)
# ordinal will not change
for freq in ["3Y", offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period("2007", freq="3Y")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# ordinal will not change
for freq in ["3Y", offsets.YearEnd(3)]:
result = p.asfreq(freq, how="S")
expected = Period("2007", freq="3Y")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# mult freq to normal freq
p = Period(freq="3Y", year=2007)
# ordinal will change because how=E is the default
for freq in ["Y", offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period("2009", freq="Y")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# ordinal will not change
for freq in ["Y", offsets.YearEnd()]:
result = p.asfreq(freq, how="s")
expected = Period("2007", freq="Y")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
p = Period(freq="Y", year=2007)
for freq in ["2M", offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period("2007-12", freq="2M")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
for freq in ["2M", offsets.MonthEnd(2)]:
result = p.asfreq(freq, how="s")
expected = Period("2007-01", freq="2M")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
p = Period(freq="3Y", year=2007)
for freq in ["2M", offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period("2009-12", freq="2M")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
for freq in ["2M", offsets.MonthEnd(2)]:
result = p.asfreq(freq, how="s")
expected = Period("2007-01", freq="2M")
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
def test_asfreq_combined(self):
# normal freq to combined freq
p = Period("2007", freq="h")
# ordinal will not change
expected = Period("2007", freq="25h")
for freq, how in zip(["1D1h", "1h1D"], ["E", "S"]):
result = p.asfreq(freq, how=how)
assert result == expected
assert result.ordinal == expected.ordinal
assert result.freq == expected.freq
# combined freq to normal freq
p1 = Period(freq="1D1h", year=2007)
p2 = Period(freq="1h1D", year=2007)
# ordinal will change because how=E is the default
result1 = p1.asfreq("h")
result2 = p2.asfreq("h")
expected = Period("2007-01-02", freq="h")
assert result1 == expected
assert result1.ordinal == expected.ordinal
assert result1.freq == expected.freq
assert result2 == expected
assert result2.ordinal == expected.ordinal
assert result2.freq == expected.freq
# ordinal will not change
result1 = p1.asfreq("h", how="S")
result2 = p2.asfreq("h", how="S")
expected = Period("2007-01-01", freq="h")
assert result1 == expected
assert result1.ordinal == expected.ordinal
assert result1.freq == expected.freq
assert result2 == expected
assert result2.ordinal == expected.ordinal
assert result2.freq == expected.freq
def test_asfreq_MS(self):
initial = Period("2013")
assert initial.asfreq(freq="M", how="S") == Period("2013-01", "M")
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
initial.asfreq(freq="MS", how="S")
with pytest.raises(ValueError, match=msg):
Period("2013-01", "MS")
| TestFreqConversion |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/forms.py | {
"start": 816,
"end": 935
} | class ____(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
| Page3 |
python | dask__dask | dask/tests/test_context.py | {
"start": 677,
"end": 1176
} | class ____:
@globalmethod(key="f")
def f(): # type: ignore[misc]
return 1
g = globalmethod(foo, key="g", falsey=bar)
def test_globalmethod():
x = Foo()
assert x.f() == 1
with dask.config.set(f=lambda: 2):
assert x.f() == 2
with dask.config.set(f=foo):
assert x.f is foo
assert x.f() == "foo"
assert x.g is foo
assert x.g() == "foo"
with dask.config.set(g=False):
assert x.g is bar
assert x.g() == "bar"
| Foo |
python | readthedocs__readthedocs.org | readthedocs/api/v3/mixins.py | {
"start": 1565,
"end": 4506
} | class ____:
# Lookup names defined on ``readthedocs/api/v3/urls.py`` when defining the
# mapping between URLs and views through the router.
PROJECT_LOOKUP_NAMES = [
"project__slug",
"projects__slug",
"parent__slug",
"superprojects__parent__slug",
"main_language_project__slug",
]
VERSION_LOOKUP_NAMES = [
"version__slug",
]
ORGANIZATION_LOOKUP_NAMES = [
"organization__slug",
"organizations__slug",
]
BUILD_LOOKUP_NAMES = [
"build__id",
]
USER_LOOKUP_NAMES = [
"user__username",
]
def _get_parent_object_lookup(self, lookup_names):
query_dict = self.get_parents_query_dict()
for lookup in lookup_names:
value = query_dict.get(lookup)
if value:
return value
def _get_parent_project(self):
slug = self._get_parent_object_lookup(self.PROJECT_LOOKUP_NAMES)
# when hitting ``/projects/<slug>/`` we don't have a "parent" project
# because this endpoint is the base one, so we just get the project from
# ``project_slug`` kwargs
slug = slug or self.kwargs.get("project_slug")
return get_object_or_404(Project, slug=slug)
def _get_parent_build(self):
"""
Filter the build by the permissions of the current user.
Build permissions depend not only on the project, but also on
the version, Build.objects.api takes all that into consideration.
"""
project_slug = self._get_parent_object_lookup(self.PROJECT_LOOKUP_NAMES)
build_pk = self._get_parent_object_lookup(self.BUILD_LOOKUP_NAMES)
return get_object_or_404(
Build.objects.api(user=self.request.user),
pk=build_pk,
project__slug=project_slug,
)
def _get_parent_version(self):
project_slug = self._get_parent_object_lookup(self.PROJECT_LOOKUP_NAMES)
slug = self._get_parent_object_lookup(self.VERSION_LOOKUP_NAMES)
return get_object_or_404(
Version,
slug=slug,
project__slug=project_slug,
)
def _get_parent_organization(self):
slug = self._get_parent_object_lookup(self.ORGANIZATION_LOOKUP_NAMES)
# when hitting ``/organizations/<slug>/`` we don't have a "parent" organization
# because this endpoint is the base one, so we just get the organization from
# ``organization_slug`` kwargs
slug = slug or self.kwargs.get("organization_slug")
return get_object_or_404(
Organization,
slug=slug,
)
def _get_parent_user(self):
username = self._get_parent_object_lookup(self.USER_LOOKUP_NAMES)
username = username or self.kwargs.get("user_username")
return get_object_or_404(
User,
username=username,
)
| NestedParentObjectMixin |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-faiss/tests/test_vector_stores_faiss.py | {
"start": 2230,
"end": 10450
} | class ____:
def test_add_documents(self, node_embeddings: List[TextNode]) -> None:
"""Test adding documents to faiss map vector store."""
vector_store = FaissMapVectorStore(faiss_index=get_map_index())
# Add nodes to the faiss map vector
vector_store.add(node_embeddings)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
assert len(vector_store._faiss_id_to_node_id_map) == 3
assert len(vector_store._node_id_to_faiss_id_map) == 3
assert (
vector_store._faiss_id_to_node_id_map[0]
== "12c70eed-5779-4008-aba0-596e003f6443"
)
assert (
vector_store._faiss_id_to_node_id_map[1]
== "f7d81cb3-bb42-47e6-96f5-17db6860cd11"
)
assert (
vector_store._faiss_id_to_node_id_map[2]
== "469e9537-7bc5-4669-9ff6-baa0ed086236"
)
assert (
vector_store._node_id_to_faiss_id_map[
"12c70eed-5779-4008-aba0-596e003f6443"
]
== 0
)
assert (
vector_store._node_id_to_faiss_id_map[
"f7d81cb3-bb42-47e6-96f5-17db6860cd11"
]
== 1
)
assert (
vector_store._node_id_to_faiss_id_map[
"469e9537-7bc5-4669-9ff6-baa0ed086236"
]
== 2
)
def test_delete_nodes(self, node_embeddings: List[TextNode]) -> None:
"""Test deleting nodes from faiss map vector store."""
vector_store = FaissMapVectorStore(faiss_index=get_map_index())
# Add nodes to the faiss map vector
vector_store.add(node_embeddings)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
vector_store.delete_nodes(
node_ids=["469e9537-7bc5-4669-9ff6-baa0ed086236"],
)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
print(result)
assert result.similarities[0] == 64.0
assert result.ids[0] == "f7d81cb3-bb42-47e6-96f5-17db6860cd11"
assert len(vector_store._faiss_id_to_node_id_map) == 2
assert len(vector_store._node_id_to_faiss_id_map) == 2
assert (
vector_store._faiss_id_to_node_id_map[0]
== "12c70eed-5779-4008-aba0-596e003f6443"
)
assert (
vector_store._faiss_id_to_node_id_map[1]
== "f7d81cb3-bb42-47e6-96f5-17db6860cd11"
)
assert (
vector_store._node_id_to_faiss_id_map[
"12c70eed-5779-4008-aba0-596e003f6443"
]
== 0
)
assert (
vector_store._node_id_to_faiss_id_map[
"f7d81cb3-bb42-47e6-96f5-17db6860cd11"
]
== 1
)
def test_delete(self, node_embeddings: List[TextNode]) -> None:
"""Test deleting nodes from faiss map vector store."""
vector_store = FaissMapVectorStore(faiss_index=get_map_index())
# Add nodes to the faiss map vector
vector_store.add(node_embeddings)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
vector_store.delete(
ref_doc_id="469e9537-7bc5-4669-9ff6-baa0ed086236",
)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
print(result)
assert result.similarities[0] == 64.0
assert result.ids[0] == "f7d81cb3-bb42-47e6-96f5-17db6860cd11"
assert len(vector_store._faiss_id_to_node_id_map) == 2
assert len(vector_store._node_id_to_faiss_id_map) == 2
assert (
vector_store._faiss_id_to_node_id_map[0]
== "12c70eed-5779-4008-aba0-596e003f6443"
)
assert (
vector_store._faiss_id_to_node_id_map[1]
== "f7d81cb3-bb42-47e6-96f5-17db6860cd11"
)
assert (
vector_store._node_id_to_faiss_id_map[
"12c70eed-5779-4008-aba0-596e003f6443"
]
== 0
)
assert (
vector_store._node_id_to_faiss_id_map[
"f7d81cb3-bb42-47e6-96f5-17db6860cd11"
]
== 1
)
def test_delete_nodes_with_filters(self, node_embeddings: List[TextNode]) -> None:
"""Test deleting nodes from faiss map vector store with filters."""
vector_store = FaissMapVectorStore(faiss_index=get_map_index())
# Add nodes to the faiss map vector
vector_store.add(node_embeddings)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
try:
vector_store.delete_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="genre", value="Thriller", operator="=="),
MetadataFilter(key="pages", value=10, operator=">"),
]
),
)
raise AssertionError
except NotImplementedError:
# Metadata filters not implemented for Faiss yet.
pass
def test_persist_and_load(self, node_embeddings: List[TextNode]) -> None:
"""Test persisting and loading from faiss map vector store."""
vector_store = FaissMapVectorStore(faiss_index=get_map_index())
# Add nodes to the faiss map vector
vector_store.add(node_embeddings)
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
# Persist the vector store
vector_store.persist()
# Load the vector store
loaded_vector_store = FaissMapVectorStore.from_persist_dir()
assert (
loaded_vector_store._node_id_to_faiss_id_map
== vector_store._node_id_to_faiss_id_map
)
assert (
loaded_vector_store._faiss_id_to_node_id_map
== vector_store._faiss_id_to_node_id_map
)
# query from loaded vector store
query = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=1,
)
result = loaded_vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
# delete from original vector store
vector_store.delete(
ref_doc_id="469e9537-7bc5-4669-9ff6-baa0ed086236",
)
assert (
loaded_vector_store._node_id_to_faiss_id_map
!= vector_store._node_id_to_faiss_id_map
)
assert (
loaded_vector_store._faiss_id_to_node_id_map
!= vector_store._faiss_id_to_node_id_map
)
# query from loaded vector store without delete
result = loaded_vector_store.query(query)
assert result.similarities[0] == 0.0
assert result.ids[0] == "469e9537-7bc5-4669-9ff6-baa0ed086236"
| TestFaissMapVectorStore |
python | explosion__spaCy | spacy/lang/is/__init__.py | {
"start": 153,
"end": 255
} | class ____(Language):
lang = "is"
Defaults = IcelandicDefaults
__all__ = ["Icelandic"]
| Icelandic |
python | getsentry__sentry | src/sentry/api/endpoints/organization_on_demand_metrics_estimation_stats.py | {
"start": 1828,
"end": 9950
} | class ____(OrganizationEventsV2EndpointBase):
"""Gets the estimated volume of an organization's metric events."""
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
def get(self, request: Request, organization: Organization) -> Response:
measurement = request.GET.get("yAxis")
if measurement is None:
return Response({"detail": "missing required parameter yAxis"}, status=400)
with sentry_sdk.start_span(op="discover.metrics.endpoint", name="get_full_metrics") as span:
span.set_data("organization", organization)
try:
# the discover stats
discover_stats = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=True, remove_on_demand=False),
)
stats_quality = estimate_stats_quality(discover_stats["data"])
if _should_scale(measurement):
# we scale the indexed data with the ratio between indexed counts and metrics counts
# in order to get an estimate of the true volume of transactions
# the closest we have to the stats in discover that can also be queried in metrics
base_discover = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=True, remove_on_demand=True),
)
# the closest we have to the stats in metrics, with no on_demand metrics
base_metrics = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=False, remove_on_demand=True),
)
estimated_volume = estimate_volume(
discover_stats["data"], base_discover["data"], base_metrics["data"]
)
discover_stats["data"] = estimated_volume
# we can't find any data (not even in metrics))
if (
stats_quality == StatsQualityEstimation.NO_INDEXED_DATA
and _count_non_zero_intervals(base_discover["data"]) == 0
):
stats_quality = StatsQualityEstimation.NO_DATA
metrics.incr(
"metrics_estimation_stats.data_quality",
sample_rate=1.0,
tags={"data_quality": stats_quality.value},
)
except ValidationError:
return Response(
{"detail": "Comparison period is outside retention window"}, status=400
)
return Response(discover_stats, status=200)
def _count_non_zero_intervals(stats: list[MetricVolumeRow]) -> int:
"""
Counts the number of intervals with non-zero values
"""
non_zero_intervals = 0
for idx in range(len(stats)):
if _get_value(stats[idx]) != 0:
non_zero_intervals += 1
return non_zero_intervals
def estimate_stats_quality(stats: list[MetricVolumeRow]) -> StatsQualityEstimation:
"""
Estimates the quality of the stats estimation based on the number of intervals with no data
"""
if len(stats) == 0:
return StatsQualityEstimation.NO_DATA
data_intervals = _count_non_zero_intervals(stats)
data_ratio = data_intervals / len(stats)
if data_ratio >= 0.8:
return StatsQualityEstimation.GOOD_INDEXED_DATA
elif data_ratio > 0.4:
return StatsQualityEstimation.ACCEPTABLE_INDEXED_DATA
elif data_intervals > 0:
return StatsQualityEstimation.POOR_INDEXED_DATA
else:
return StatsQualityEstimation.NO_INDEXED_DATA
def get_stats_generator(
use_discover: bool, remove_on_demand: bool
) -> Callable[[Sequence[str], str, SnubaParams, int, bool, timedelta | None], SnubaTSResult]:
"""
Returns a get_stats function that can fetch from either metrics or discover and
with or without on_demand metrics.
"""
def get_discover_stats(
query_columns: Sequence[str],
query: str,
snuba_params: SnubaParams,
rollup: int,
zerofill_results: bool, # not used but required by get_event_stats_data
comparison_delta: timedelta | None, # not used but required by get_event_stats_data
) -> SnubaTSResult:
# use discover or metrics_performance depending on the dataset
if use_discover:
module: ModuleType = discover
else:
module = metrics_performance
if remove_on_demand:
query = to_standard_metrics_query(query)
return module.timeseries_query(
selected_columns=query_columns,
query=query,
snuba_params=snuba_params,
rollup=rollup,
referrer=Referrer.API_ORGANIZATION_METRICS_ESTIMATION_STATS.value,
zerofill_results=True,
has_metrics=True,
)
return get_discover_stats
def estimate_volume(
indexed_data: list[MetricVolumeRow],
base_index: list[MetricVolumeRow],
base_metrics: list[MetricVolumeRow],
) -> list[MetricVolumeRow]:
"""
Estimates the volume of an on-demand metric by scaling the counts of the indexed metric with an estimated
sampling rate deduced from the factor of base_indexed and base_metrics time series.
The idea is that if we could multiply the indexed data by the actual sampling rate at each interval we would
obtain a good estimate of the volume. To get the actual sampling rate at any time we query both the indexed and
the metric data for the base metric (not the derived metric) and the ratio would be the approximate sample rate
"""
assert _is_data_aligned(indexed_data, base_index)
assert _is_data_aligned(indexed_data, base_metrics)
index_total = 0.0
for elm in base_index:
index_total += _get_value(elm)
metrics_total = 0.0
for elm in base_metrics:
metrics_total += _get_value(elm)
if index_total == 0.0:
return indexed_data # there is no way to estimate the volume
avg_inverted_rate = metrics_total / index_total
for idx in range(len(indexed_data)):
indexed = _get_value(base_index[idx])
metrics = _get_value(base_metrics[idx])
if indexed != 0:
inverted_rate = metrics / indexed
else:
inverted_rate = avg_inverted_rate
_set_value(indexed_data[idx], _get_value(indexed_data[idx]) * inverted_rate)
return indexed_data
def _get_value(elm: MetricVolumeRow) -> float:
ret_val = cast(list[CountResult], elm[1])[0].get("count")
if ret_val is None:
return 0.0
return ret_val
def _set_value(elm: MetricVolumeRow, value: float) -> None:
cast(list[CountResult], elm[1])[0]["count"] = value
def _is_data_aligned(left: list[MetricVolumeRow], right: list[MetricVolumeRow]) -> bool:
"""
Checks if the two timeseries are aligned (represent the same time intervals).
Checks the length and the first and last timestamp (assumes they are correctly constructed, no
check for individual intervals)
"""
if len(left) != len(right):
return False
if len(left) == 0:
return True
return left[0][0] == right[0][0] and left[-1][0] == right[-1][0]
def _should_scale(metric: str) -> bool:
"""
Decides if the metric should be scaled ( based on the ratio between indexed and metrics data) or not
We can only scale counters ( percentiles and ratios cannot be scaled based on the ratio
between indexed and metrics data)
"""
if fields.is_function(metric):
function, params, alias = fields.parse_function(metric)
if function and function.lower() == "count":
return True
return False
| OrganizationOnDemandMetricsEstimationStatsEndpoint |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 792,
"end": 883
} | class ____(A1):
def m0(self, x):
self.m1(x)
def m1(self, x):
pass
| B1 |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr_resnet.py | {
"start": 2334,
"end": 4060
} | class ____(nn.Module):
"""
ResNet Embeddings (stem) composed of a deep aggressive convolution.
"""
def __init__(self, config: RTDetrResNetConfig):
super().__init__()
self.embedder = nn.Sequential(
*[
RTDetrResNetConvLayer(
config.num_channels,
config.embedding_size // 2,
kernel_size=3,
stride=2,
activation=config.hidden_act,
),
RTDetrResNetConvLayer(
config.embedding_size // 2,
config.embedding_size // 2,
kernel_size=3,
stride=1,
activation=config.hidden_act,
),
RTDetrResNetConvLayer(
config.embedding_size // 2,
config.embedding_size,
kernel_size=3,
stride=1,
activation=config.hidden_act,
),
]
)
self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.num_channels = config.num_channels
def forward(self, pixel_values: Tensor) -> Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.embedder(pixel_values)
embedding = self.pooler(embedding)
return embedding
# Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut -> RTDetrResNetChortCut
| RTDetrResNetEmbeddings |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/component/base.py | {
"start": 129,
"end": 602
} | class ____:
"""
Represents an abstract Discord message component.
Child classes should override the constructor with necessary fields for
the component type.
https://discord.com/developers/docs/interactions/message-components#component-object
"""
def __init__(self, type: int) -> None:
self.type = type
def build(self) -> DiscordMessageComponentDict:
return DiscordMessageComponentDict(type=self.type)
| DiscordMessageComponent |
python | realpython__materials | queue/src/thread_safe_queues.py | {
"start": 2266,
"end": 2478
} | class ____(Worker):
def run(self):
while True:
self.product = self.buffer.get()
self.simulate_work()
self.buffer.task_done()
self.simulate_idle()
| Consumer |
python | sqlalchemy__sqlalchemy | test/engine/test_pool.py | {
"start": 67885,
"end": 68314
} | class ____(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
| NullPoolTest |
python | keras-team__keras | keras/src/layers/rnn/gru_test.py | {
"start": 169,
"end": 11929
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.GRU,
init_kwargs={"units": 3, "dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.GRU,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.GRU,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
@parameterized.parameters([1, 2])
def test_correctness(self, implementation):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.24406259, 0.24406259, 0.24406259],
[0.611516, 0.611516, 0.611516],
[0.3928808, 0.3928808, 0.3928808],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
reset_after=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.51447755, 0.51447755, 0.51447755],
[0.6426879, 0.6426879, 0.6426879],
[0.40208298, 0.40208298, 0.40208298],
]
),
output,
)
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
use_bias=False,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.49988455, 0.49988455, 0.49988455],
[0.64701194, 0.64701194, 0.64701194],
[0.4103359, 0.4103359, 0.4103359],
]
),
output,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.GRU(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.29542392, 0.29542392, 0.29542392, 0.29542392],
[0.5885018, 0.5885018, 0.5885018, 0.5885018],
]
),
output,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(4).reshape((2, 2)).astype("float32")
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.23774096, 0.33508456], [0.83659905, 1.0227708]]),
output,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13486053, 0.23261218], [0.78257304, 0.9691353]]),
output,
)
def test_pass_return_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(4).reshape((2, 2)).astype("float32")
# Test with go_backwards=False
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
)
output, state = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.23774096, 0.33508456], [0.83659905, 1.0227708]]),
output,
)
self.assertAllClose(output, state)
# Test with go_backwards=True
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
go_backwards=True,
)
output, state = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13486053, 0.23261218], [0.78257304, 0.9691353]]),
output,
)
self.assertAllClose(output, state)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.19393763, 0.19393763], [0.30818558, 0.30818558]]),
output,
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.09497581, 0.09497581],
[0.19393763, 0.19393763],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.16051409, 0.16051409],
[0.30818558, 0.30818558],
],
),
output[1],
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.03606692, 0.03606692],
[0.09497581, 0.09497581],
[0.0, 0.0],
[0.19393763, 0.19393763],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.16051409, 0.16051409],
[0.0, 0.0],
[0.0, 0.0],
[0.30818558, 0.30818558],
],
),
output[1],
)
layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.11669192, 0.11669192], [0.28380975, 0.28380975]]),
output,
)
def test_legacy_implementation_argument(self):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.GRU(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
config = layer.get_config()
config["implementation"] = 0 # Add legacy argument
layer = layers.GRU.from_config(config)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.5217289, 0.5217289, 0.5217289],
[0.6371659, 0.6371659, 0.6371659],
[0.39384964, 0.39384964, 0.3938496],
]
),
output,
)
| GRUTest |
python | doocs__leetcode | solution/3500-3599/3579.Minimum Steps to Convert String with Operations/Solution.py | {
"start": 0,
"end": 803
} | class ____:
def minOperations(self, word1: str, word2: str) -> int:
def calc(l: int, r: int, rev: bool) -> int:
cnt = Counter()
res = 0
for i in range(l, r + 1):
j = r - (i - l) if rev else i
a, b = word1[j], word2[i]
if a != b:
if cnt[(b, a)] > 0:
cnt[(b, a)] -= 1
else:
cnt[(a, b)] += 1
res += 1
return res
n = len(word1)
f = [inf] * (n + 1)
f[0] = 0
for i in range(1, n + 1):
for j in range(i):
t = min(calc(j, i - 1, False), 1 + calc(j, i - 1, True))
f[i] = min(f[i], f[j] + t)
return f[n]
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/generator_test.py | {
"start": 15026,
"end": 16606
} | class ____(unittest.TestCase):
"""Checks that messages, enums and files are correctly registered."""
def testGetSymbol(self):
self.assertEqual(
unittest_pb2.TestAllTypes, symbol_database.Default().GetSymbol(
'proto2_unittest.TestAllTypes'))
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage,
symbol_database.Default().GetSymbol(
'proto2_unittest.TestAllTypes.NestedMessage'))
with self.assertRaises(KeyError):
symbol_database.Default().GetSymbol('proto2_unittest.NestedMessage')
self.assertEqual(
unittest_pb2.TestAllTypes.OptionalGroup,
symbol_database.Default().GetSymbol(
'proto2_unittest.TestAllTypes.OptionalGroup'))
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup,
symbol_database.Default().GetSymbol(
'proto2_unittest.TestAllTypes.RepeatedGroup'))
def testEnums(self):
self.assertEqual(
'proto2_unittest.ForeignEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'proto2_unittest.ForeignEnum').full_name)
self.assertEqual(
'proto2_unittest.TestAllTypes.NestedEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'proto2_unittest.TestAllTypes.NestedEnum').full_name)
def testFindFileByName(self):
self.assertEqual(
'google/protobuf/unittest.proto',
symbol_database.Default().pool.FindFileByName(
'google/protobuf/unittest.proto').name)
if __name__ == '__main__':
unittest.main()
| SymbolDatabaseRegistrationTest |
python | wandb__wandb | wandb/automations/_generated/integrations_by_entity.py | {
"start": 461,
"end": 575
} | class ____(GQLResult):
integrations: Optional[IntegrationsByEntityEntityIntegrations]
| IntegrationsByEntityEntity |
python | pandas-dev__pandas | asv_bench/benchmarks/index_object.py | {
"start": 5818,
"end": 6756
} | class ____:
# GH 24813
params = [10**3, 10**5]
def setup(self, N):
left = np.append(np.arange(N), np.array(0))
right = np.append(np.arange(1, N + 1), np.array(1))
self.intv = IntervalIndex.from_arrays(left, right)
self.intv._engine
self.intv2 = IntervalIndex.from_arrays(left + 1, right + 1)
self.intv2._engine
self.left = IntervalIndex.from_breaks(np.arange(N))
self.right = IntervalIndex.from_breaks(np.arange(N - 3, 2 * N - 3))
def time_monotonic_inc(self, N):
self.intv.is_monotonic_increasing
def time_is_unique(self, N):
self.intv.is_unique
def time_intersection(self, N):
self.left.intersection(self.right)
def time_intersection_one_duplicate(self, N):
self.intv.intersection(self.right)
def time_intersection_both_duplicate(self, N):
self.intv.intersection(self.intv2)
| IntervalIndexMethod |
python | openai__openai-python | src/openai/types/realtime/realtime_mcp_protocol_error.py | {
"start": 201,
"end": 313
} | class ____(BaseModel):
code: int
message: str
type: Literal["protocol_error"]
| RealtimeMcpProtocolError |
python | pytorch__pytorch | torch/_dynamo/metrics_context.py | {
"start": 7390,
"end": 8889
} | class ____:
def __init__(self, on_exit: OnExitType):
"""
Similar to MetricsContext, but used to gather the runtime metrics that are
decoupled from compilation, where there's not a natural place to insert a
context manager.
"""
self._on_exit = on_exit
self._metrics: dict[str, Any] = {}
self._start_time_ns: int = 0
def increment(
self, metric: str, value: int, extra: Optional[dict[str, Any]] = None
) -> None:
"""
Increment a metric by a given amount.
"""
if not self._metrics:
# Start timing on the first entry
self._start_time_ns = time.time_ns()
if metric not in self._metrics:
self._metrics[metric] = 0
self._metrics[metric] += value
if extra:
for k, v in extra.items():
if k not in self._metrics and v is not None:
self._metrics[k] = v
def finish(self) -> None:
"""
Call the on_exit function with the metrics gathered so far and reset.
"""
if self._metrics:
try:
end_time_ns = time.time_ns()
self._on_exit(
self._start_time_ns, end_time_ns, self._metrics, None, None
)
except Exception:
log.exception("Unexpected exception logging runtime metrics")
finally:
self._metrics = {}
| RuntimeMetricsContext |
python | tensorflow__tensorflow | third_party/xla/xla/python/xla_client.py | {
"start": 9747,
"end": 14136
} | class ____:
"""Python representation of a xla.ConvolutionDimensionNumbers protobuf."""
__slots__ = (
'input_batch_dimension',
'input_feature_dimension',
'input_spatial_dimensions',
'kernel_input_feature_dimension',
'kernel_output_feature_dimension',
'kernel_spatial_dimensions',
'output_batch_dimension',
'output_feature_dimension',
'output_spatial_dimensions',
)
def __init__(self):
self.input_batch_dimension = 0
self.input_feature_dimension = 0
self.input_spatial_dimensions = []
self.kernel_input_feature_dimension = 0
self.kernel_output_feature_dimension = 0
self.kernel_spatial_dimensions = []
self.output_batch_dimension = 0
self.output_feature_dimension = 0
self.output_spatial_dimensions = []
def make_convolution_dimension_numbers(
dimension_numbers: (
None | ConvolutionDimensionNumbers | tuple[str, str, str]
),
num_spatial_dimensions: int,
) -> ConvolutionDimensionNumbers:
"""Builds a ConvolutionDimensionNumbers object from a specification.
Args:
dimension_numbers: optional, either a ConvolutionDimensionNumbers object or
a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of length
N+2 identifying by position: (1) batch dimensions in lhs, rhs, and the
output with the character 'N', (2) feature dimensions in lhs and the
output with the character 'C', (3) input and output feature dimensions in
rhs with the characters 'I' and 'O' respectively, and (4) spatial
dimension correspondences between lhs, rhs, and the output using any
distinct characters. For example, to indicate dimension numbers consistent
with the Conv operation with two spatial dimensions, one could use
('NCHW', 'OIHW', 'NCHW'). As another example, to indicate dimension
numbers consistent with the TensorFlow Conv2D operation, one could use
('NHWC', 'HWIO', 'NHWC'). When using the latter form of convolution
dimension specification, window strides are associated with spatial
dimension character labels according to the order in which the labels
appear in the rhs_spec string, so that window_strides[0] is matched with
the dimension corresponding to the first character appearing in rhs_spec
that is not 'I' or 'O'. By default, use the same dimension numbering as
Conv and ConvWithGeneralPadding.
num_spatial_dimensions: the number of spatial dimensions.
Returns:
A `ConvolutionDimensionNumbers` object.
"""
if dimension_numbers is None:
nd = num_spatial_dimensions
dimension_numbers = ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = 0
dimension_numbers.input_feature_dimension = 1
dimension_numbers.output_batch_dimension = 0
dimension_numbers.output_feature_dimension = 1
dimension_numbers.kernel_output_feature_dimension = 0
dimension_numbers.kernel_input_feature_dimension = 1
dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd))
elif isinstance(dimension_numbers, tuple):
lhs_spec, rhs_spec, out_spec = dimension_numbers
dimension_numbers = ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = lhs_spec.index('N')
dimension_numbers.input_feature_dimension = lhs_spec.index('C')
dimension_numbers.output_batch_dimension = out_spec.index('N')
dimension_numbers.output_feature_dimension = out_spec.index('C')
dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O')
dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I')
dimension_numbers.kernel_spatial_dimensions.extend(
i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'}
)
dimension_numbers.input_spatial_dimensions.extend(
sorted(
(i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(lhs_spec[i]),
)
)
dimension_numbers.output_spatial_dimensions.extend(
sorted(
(i for i, c in enumerate(out_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(out_spec[i]),
)
)
return dimension_numbers
| ConvolutionDimensionNumbers |
python | doocs__leetcode | solution/2300-2399/2316.Count Unreachable Pairs of Nodes in an Undirected Graph/Solution.py | {
"start": 0,
"end": 512
} | class ____:
def countPairs(self, n: int, edges: List[List[int]]) -> int:
def dfs(i: int) -> int:
if vis[i]:
return 0
vis[i] = True
return 1 + sum(dfs(j) for j in g[i])
g = [[] for _ in range(n)]
for a, b in edges:
g[a].append(b)
g[b].append(a)
vis = [False] * n
ans = s = 0
for i in range(n):
t = dfs(i)
ans += s * t
s += t
return ans
| Solution |
python | pypa__pip | src/pip/_vendor/tomli/_parser.py | {
"start": 2556,
"end": 7135
} | class ____(ValueError):
"""An error raised if a document is not valid TOML.
Adds the following attributes to ValueError:
msg: The unformatted error message
doc: The TOML document being parsed
pos: The index of doc where parsing failed
lineno: The line corresponding to pos
colno: The column corresponding to pos
"""
def __init__(
self,
msg: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
doc: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
pos: Pos | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
*args: Any,
):
if (
args
or not isinstance(msg, str)
or not isinstance(doc, str)
or not isinstance(pos, int)
):
import warnings
warnings.warn(
"Free-form arguments for TOMLDecodeError are deprecated. "
"Please set 'msg' (str), 'doc' (str) and 'pos' (int) arguments only.",
DeprecationWarning,
stacklevel=2,
)
if pos is not DEPRECATED_DEFAULT:
args = pos, *args
if doc is not DEPRECATED_DEFAULT:
args = doc, *args
if msg is not DEPRECATED_DEFAULT:
args = msg, *args
ValueError.__init__(self, *args)
return
lineno = doc.count("\n", 0, pos) + 1
if lineno == 1:
colno = pos + 1
else:
colno = pos - doc.rindex("\n", 0, pos)
if pos >= len(doc):
coord_repr = "end of document"
else:
coord_repr = f"line {lineno}, column {colno}"
errmsg = f"{msg} (at {coord_repr})"
ValueError.__init__(self, errmsg)
self.msg = msg
self.doc = doc
self.pos = pos
self.lineno = lineno
self.colno = colno
def load(__fp: IO[bytes], *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = __fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float)
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
try:
src = __s.replace("\r\n", "\n")
except (AttributeError, TypeError):
raise TypeError(
f"Expected str object, not '{type(__s).__qualname__}'"
) from None
pos = 0
out = Output()
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise TOMLDecodeError("Invalid statement", src, pos)
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise TOMLDecodeError(
"Expected newline or end of document after a statement", src, pos
)
pos += 1
return out.data.dict
| TOMLDecodeError |
python | django-compressor__django-compressor | compressor/exceptions.py | {
"start": 232,
"end": 339
} | class ____(Exception):
"""
This exception is raised when a filter fails
"""
pass
| FilterError |
python | wandb__wandb | wandb/sdk/launch/inputs/internal.py | {
"start": 1586,
"end": 2089
} | class ____:
"""Arguments for the publish_job_input of Interface."""
def __init__(
self,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
schema: Optional[dict] = None,
file_path: Optional[str] = None,
run_config: Optional[bool] = None,
):
self.include = include
self.exclude = exclude
self.schema = schema
self.file_path = file_path
self.run_config = run_config
| JobInputArguments |
python | facebookresearch__faiss | benchs/distributed_ondisk/combined_index.py | {
"start": 4914,
"end": 6376
} | class ____(CombinedIndex):
""" loads a CombinedIndex with the data from the big photodna index """
def __init__(self):
# set some paths
workdir = "/checkpoint/matthijs/ondisk_distributed/"
# empty index with the proper quantizer
indexfname = workdir + 'trained.faissindex'
# index that has some invlists that override the big one
masked_index_fname = None
invlist_fnames = [
'%s/hslices/slice%d.faissindex' % (workdir, i)
for i in range(50)
]
CombinedIndex.__init__(self, invlist_fnames, indexfname, masked_index_fname)
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:].copy()
def fvecs_read(fname):
return ivecs_read(fname).view('float32')
if __name__ == '__main__':
import time
ci = CombinedIndexDeep1B()
print('loaded index of size ', ci.index.ntotal)
deep1bdir = "/datasets01_101/simsearch/041218/deep1b/"
xq = fvecs_read(deep1bdir + "deep1B_queries.fvecs")
gt_fname = deep1bdir + "deep1B_groundtruth.ivecs"
gt = ivecs_read(gt_fname)
for nprobe in 1, 10, 100, 1000:
ci.set_nprobe(nprobe)
t0 = time.time()
D, I = ci.search(xq, 100)
t1 = time.time()
print('nprobe=%d 1-recall@1=%.4f t=%.2fs' % (
nprobe, (I[:, 0] == gt[:, 0]).sum() / len(xq),
t1 - t0
))
| CombinedIndexDeep1B |
python | Textualize__textual | examples/five_by_five.py | {
"start": 3320,
"end": 4028
} | class ____(Button):
"""Individual playable cell in the game."""
@staticmethod
def at(row: int, col: int) -> str:
"""Get the ID of the cell at the given location.
Args:
row (int): The row of the cell.
col (int): The column of the cell.
Returns:
str: A string ID for the cell.
"""
return f"cell-{row}-{col}"
def __init__(self, row: int, col: int) -> None:
"""Initialise the game cell.
Args:
row (int): The row of the cell.
col (int): The column of the cell.
"""
super().__init__("", id=self.at(row, col))
self.row = row
self.col = col
| GameCell |
python | jazzband__django-oauth-toolkit | tests/test_oidc_views.py | {
"start": 7160,
"end": 31173
} | class ____(TestCase):
def test_get_jwks_info(self):
self.oauth2_settings.OIDC_RSA_PRIVATE_KEYS_INACTIVE = []
expected_response = {
"keys": [
{
"alg": "RS256",
"use": "sig",
"kid": "s4a1o8mFEd1tATAIH96caMlu4hOxzBUaI2QTqbYNBHs",
"e": "AQAB",
"kty": "RSA",
"n": "mwmIeYdjZkLgalTuhvvwjvnB5vVQc7G9DHgOm20Hw524bLVTk49IXJ2Scw42HOmowWWX-oMVT_ca3ZvVIeffVSN1-TxVy2zB65s0wDMwhiMoPv35z9IKHGMZgl9vlyso_2b7daVF_FQDdgIayUn8TQylBxEU1RFfW0QSYOBdAt8", # noqa
}
]
}
response = self.client.get(reverse("oauth2_provider:jwks-info"))
self.assertEqual(response.status_code, 200)
assert response.json() == expected_response
def test_get_jwks_info_no_rsa_key(self):
self.oauth2_settings.OIDC_RSA_PRIVATE_KEY = None
response = self.client.get(reverse("oauth2_provider:jwks-info"))
self.assertEqual(response.status_code, 200)
assert response.json() == {"keys": []}
def test_get_jwks_info_multiple_rsa_keys(self):
expected_response = {
"keys": [
{
"alg": "RS256",
"e": "AQAB",
"kid": "s4a1o8mFEd1tATAIH96caMlu4hOxzBUaI2QTqbYNBHs",
"kty": "RSA",
"n": "mwmIeYdjZkLgalTuhvvwjvnB5vVQc7G9DHgOm20Hw524bLVTk49IXJ2Scw42HOmowWWX-oMVT_ca3ZvVIeffVSN1-TxVy2zB65s0wDMwhiMoPv35z9IKHGMZgl9vlyso_2b7daVF_FQDdgIayUn8TQylBxEU1RFfW0QSYOBdAt8", # noqa
"use": "sig",
},
{
"alg": "RS256",
"e": "AQAB",
"kid": "AJ_IkYJUFWqiKKE2FvPIESroTvownbaj0OzL939oIIE",
"kty": "RSA",
"n": "0qVzbcWg_fgygZ0liTaFeodD2bkinhj8gPJ9P2rPzvqG6ImI9YKkEk8Dxcc7eWcudnw5iEL8wx_tgooaRiHiYfUrFBBXfA15D_15PdX_5gG8rQbJ7XMxQrYoRUcVm2wQDB4fIuR7sTPqx9p8OR4f--BixOfM5Oa7SEUtQ8kvrlE", # noqa
"use": "sig",
},
]
}
response = self.client.get(reverse("oauth2_provider:jwks-info"))
self.assertEqual(response.status_code, 200)
assert response.json() == expected_response
def mock_request():
"""
Dummy request with an AnonymousUser attached.
"""
return mock_request_for(AnonymousUser())
def mock_request_for(user):
"""
Dummy request with the `user` attached.
"""
request = RequestFactory().get("")
request.user = user
return request
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_validate_logout_request(oidc_tokens, public_application, rp_settings):
oidc_tokens = oidc_tokens
application = oidc_tokens.application
client_id = application.client_id
id_token = oidc_tokens.id_token
view = RPInitiatedLogoutView()
view.request = mock_request_for(oidc_tokens.user)
assert view.validate_logout_request(
id_token_hint=None,
client_id=None,
post_logout_redirect_uri=None,
) == (None, None)
assert view.validate_logout_request(
id_token_hint=None,
client_id=client_id,
post_logout_redirect_uri=None,
) == (application, None)
assert view.validate_logout_request(
id_token_hint=None,
client_id=client_id,
post_logout_redirect_uri="http://example.org",
) == (application, None)
assert view.validate_logout_request(
id_token_hint=id_token,
client_id=None,
post_logout_redirect_uri="http://example.org",
) == (application, oidc_tokens.user)
assert view.validate_logout_request(
id_token_hint=id_token,
client_id=client_id,
post_logout_redirect_uri="http://example.org",
) == (application, oidc_tokens.user)
with pytest.raises(InvalidIDTokenError):
view.validate_logout_request(
id_token_hint="111",
client_id=public_application.client_id,
post_logout_redirect_uri="http://other.org",
)
with pytest.raises(ClientIdMissmatch):
view.validate_logout_request(
id_token_hint=id_token,
client_id=public_application.client_id,
post_logout_redirect_uri="http://other.org",
)
with pytest.raises(InvalidOIDCClientError):
view.validate_logout_request(
id_token_hint=None,
client_id=None,
post_logout_redirect_uri="http://example.org",
)
with pytest.raises(InvalidOIDCRedirectURIError):
view.validate_logout_request(
id_token_hint=None,
client_id=client_id,
post_logout_redirect_uri="example.org",
)
with pytest.raises(InvalidOIDCRedirectURIError):
view.validate_logout_request(
id_token_hint=None,
client_id=client_id,
post_logout_redirect_uri="imap://example.org",
)
with pytest.raises(InvalidOIDCRedirectURIError):
view.validate_logout_request(
id_token_hint=None,
client_id=client_id,
post_logout_redirect_uri="http://other.org",
)
with pytest.raises(InvalidOIDCRedirectURIError):
rp_settings.OIDC_RP_INITIATED_LOGOUT_STRICT_REDIRECT_URIS = True
view.validate_logout_request(
id_token_hint=None,
client_id=public_application.client_id,
post_logout_redirect_uri="http://other.org",
)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.parametrize("ALWAYS_PROMPT", [True, False])
def test_must_prompt(oidc_tokens, other_user, rp_settings, ALWAYS_PROMPT):
rp_settings.OIDC_RP_INITIATED_LOGOUT_ALWAYS_PROMPT = ALWAYS_PROMPT
oidc_tokens = oidc_tokens
assert RPInitiatedLogoutView(request=mock_request_for(oidc_tokens.user)).must_prompt(None) is True
assert (
RPInitiatedLogoutView(request=mock_request_for(oidc_tokens.user)).must_prompt(oidc_tokens.user)
== ALWAYS_PROMPT
)
assert RPInitiatedLogoutView(request=mock_request_for(other_user)).must_prompt(oidc_tokens.user) is True
assert (
RPInitiatedLogoutView(request=mock_request_for(AnonymousUser())).must_prompt(oidc_tokens.user)
is False
)
def test__load_id_token():
assert _load_id_token("Not a Valid ID Token.") == (None, None)
def is_logged_in(client):
return get_user(client).is_authenticated
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get(logged_in_client, rp_settings):
rsp = logged_in_client.get(reverse("oauth2_provider:rp-initiated-logout"), data={})
assert rsp.status_code == 200
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_id_token(logged_in_client, oidc_tokens, rp_settings):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"), data={"id_token_hint": oidc_tokens.id_token}
)
assert rsp.status_code == 302
assert rsp["Location"] == "http://testserver/"
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_revoked_id_token(logged_in_client, oidc_tokens, rp_settings):
validator = oauth2_settings.OAUTH2_VALIDATOR_CLASS()
validator._load_id_token(oidc_tokens.id_token).revoke()
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"), data={"id_token_hint": oidc_tokens.id_token}
)
assert rsp.status_code == 400
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_id_token_redirect(logged_in_client, oidc_tokens, rp_settings):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={"id_token_hint": oidc_tokens.id_token, "post_logout_redirect_uri": "http://example.org"},
)
assert rsp.status_code == 302
assert rsp["Location"] == "http://example.org"
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_id_token_redirect_with_state(logged_in_client, oidc_tokens, rp_settings):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_tokens.id_token,
"post_logout_redirect_uri": "http://example.org",
"state": "987654321",
},
)
assert rsp.status_code == 302
assert rsp["Location"] == "http://example.org?state=987654321"
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_id_token_missmatch_client_id(
logged_in_client, oidc_tokens, public_application, rp_settings
):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={"id_token_hint": oidc_tokens.id_token, "client_id": public_application.client_id},
)
assert rsp.status_code == 400
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_public_client_redirect_client_id(
logged_in_client, oidc_non_confidential_tokens, public_application, rp_settings
):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_non_confidential_tokens.id_token,
"client_id": public_application.client_id,
"post_logout_redirect_uri": "http://other.org",
},
)
assert rsp.status_code == 302
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_public_client_strict_redirect_client_id(
logged_in_client, oidc_non_confidential_tokens, public_application, oauth2_settings
):
oauth2_settings.update(presets.OIDC_SETTINGS_RP_LOGOUT_STRICT_REDIRECT_URI)
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_non_confidential_tokens.id_token,
"client_id": public_application.client_id,
"post_logout_redirect_uri": "http://other.org",
},
)
assert rsp.status_code == 400
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_get_client_id(logged_in_client, oidc_tokens, rp_settings):
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"), data={"client_id": oidc_tokens.application.client_id}
)
assert rsp.status_code == 200
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_post(logged_in_client, oidc_tokens, rp_settings):
form_data = {
"client_id": oidc_tokens.application.client_id,
}
rsp = logged_in_client.post(reverse("oauth2_provider:rp-initiated-logout"), form_data)
assert rsp.status_code == 400
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_post_allowed(logged_in_client, oidc_tokens, rp_settings):
form_data = {"client_id": oidc_tokens.application.client_id, "allow": True}
rsp = logged_in_client.post(reverse("oauth2_provider:rp-initiated-logout"), form_data)
assert rsp.status_code == 302
assert rsp["Location"] == "http://testserver/"
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_rp_initiated_logout_post_no_session(client, oidc_tokens, rp_settings):
form_data = {"client_id": oidc_tokens.application.client_id, "allow": True}
rsp = client.post(reverse("oauth2_provider:rp-initiated-logout"), form_data)
assert rsp.status_code == 302
assert rsp["Location"] == "http://testserver/"
assert not is_logged_in(client)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_rp_initiated_logout_expired_tokens_accept(logged_in_client, application, expired_id_token):
# Accepting expired (but otherwise valid and signed by us) tokens is enabled. Logout should go through.
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": expired_id_token,
"client_id": application.client_id,
},
)
assert rsp.status_code == 302
assert not is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT_DENY_EXPIRED)
def test_rp_initiated_logout_expired_tokens_deny(logged_in_client, application, expired_id_token):
# Expired tokens should not be accepted by default.
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": expired_id_token,
"client_id": application.client_id,
},
)
assert rsp.status_code == 400
assert is_logged_in(logged_in_client)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_load_id_token_accept_expired(expired_id_token):
id_token, _ = _load_id_token(expired_id_token)
assert isinstance(id_token, get_id_token_model())
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_load_id_token_wrong_aud(id_token_wrong_aud):
id_token, claims = _load_id_token(id_token_wrong_aud)
assert id_token is None
assert claims is None
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT_DENY_EXPIRED)
def test_load_id_token_deny_expired(expired_id_token):
id_token, claims = _load_id_token(expired_id_token)
assert id_token is None
assert claims is None
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_validate_claims_wrong_iss(id_token_wrong_iss):
id_token, claims = _load_id_token(id_token_wrong_iss)
assert id_token is not None
assert claims is not None
assert not _validate_claims(mock_request(), claims)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_validate_claims(oidc_tokens):
id_token, claims = _load_id_token(oidc_tokens.id_token)
assert claims is not None
assert _validate_claims(mock_request_for(oidc_tokens.user), claims)
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.parametrize("method", ["get", "post"])
def test_userinfo_endpoint(oidc_tokens, client, method):
auth_header = "Bearer %s" % oidc_tokens.access_token
rsp = getattr(client, method)(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION=auth_header,
)
data = rsp.json()
assert "sub" in data
assert data["sub"] == str(oidc_tokens.user.pk)
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_userinfo_endpoint_bad_token(oidc_tokens, client):
# No access token
rsp = client.get(reverse("oauth2_provider:user-info"))
assert rsp.status_code == 401
# Bad access token
rsp = client.get(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION="Bearer not-a-real-token",
)
assert rsp.status_code == 401
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_token_deletion_on_logout(oidc_tokens, logged_in_client, rp_settings):
AccessToken = get_access_token_model()
IDToken = get_id_token_model()
RefreshToken = get_refresh_token_model()
assert AccessToken.objects.count() == 1
assert IDToken.objects.count() == 1
assert RefreshToken.objects.count() == 1
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_tokens.id_token,
"client_id": oidc_tokens.application.client_id,
},
)
assert rsp.status_code == 302
assert not is_logged_in(logged_in_client)
# Check that all tokens have either been deleted or expired.
assert all([token.is_expired() for token in AccessToken.objects.all()])
assert all([token.is_expired() for token in IDToken.objects.all()])
assert all([token.revoked <= timezone.now() for token in RefreshToken.objects.all()])
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_token_deletion_on_logout_without_op_session_get(oidc_tokens, client, rp_settings):
AccessToken = get_access_token_model()
IDToken = get_id_token_model()
RefreshToken = get_refresh_token_model()
assert AccessToken.objects.count() == 1
assert IDToken.objects.count() == 1
assert RefreshToken.objects.count() == 1
rsp = client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_tokens.id_token,
"client_id": oidc_tokens.application.client_id,
},
)
assert rsp.status_code == 302
assert not is_logged_in(client)
# Check that all tokens are active.
assert AccessToken.objects.count() == 0
assert IDToken.objects.count() == 0
assert RefreshToken.objects.count() == 1
with pytest.raises(AccessToken.DoesNotExist):
AccessToken.objects.get()
with pytest.raises(IDToken.DoesNotExist):
IDToken.objects.get()
refresh_token = RefreshToken.objects.get()
assert refresh_token.revoked is not None
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_token_deletion_on_logout_without_op_session_post(oidc_tokens, client, rp_settings):
AccessToken = get_access_token_model()
IDToken = get_id_token_model()
RefreshToken = get_refresh_token_model()
assert AccessToken.objects.count() == 1
assert IDToken.objects.count() == 1
assert RefreshToken.objects.count() == 1
rsp = client.post(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_tokens.id_token,
"client_id": oidc_tokens.application.client_id,
"allow": True,
},
)
assertRedirects(rsp, "http://testserver/", fetch_redirect_response=False)
assert not is_logged_in(client)
# Check that all tokens have either been deleted or expired.
assert all(token.is_expired() for token in AccessToken.objects.all())
assert all(token.is_expired() for token in IDToken.objects.all())
assert all(token.revoked <= timezone.now() for token in RefreshToken.objects.all())
@pytest.mark.django_db(databases=retrieve_current_databases())
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT_KEEP_TOKENS)
def test_token_deletion_on_logout_disabled(oidc_tokens, logged_in_client, rp_settings):
rp_settings.OIDC_RP_INITIATED_LOGOUT_DELETE_TOKENS = False
AccessToken = get_access_token_model()
IDToken = get_id_token_model()
RefreshToken = get_refresh_token_model()
assert AccessToken.objects.count() == 1
assert IDToken.objects.count() == 1
assert RefreshToken.objects.count() == 1
rsp = logged_in_client.get(
reverse("oauth2_provider:rp-initiated-logout"),
data={
"id_token_hint": oidc_tokens.id_token,
"client_id": oidc_tokens.application.client_id,
},
)
assert rsp.status_code == 302
assert not is_logged_in(logged_in_client)
# Check that the tokens have not been expired or deleted.
assert AccessToken.objects.count() == 1
assert not any([token.is_expired() for token in AccessToken.objects.all()])
assert IDToken.objects.count() == 1
assert not any([token.is_expired() for token in IDToken.objects.all()])
assert RefreshToken.objects.count() == 1
assert not any([token.revoked is not None for token in RefreshToken.objects.all()])
EXAMPLE_EMAIL = "example.email@example.com"
def claim_user_email(request):
return EXAMPLE_EMAIL
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_userinfo_endpoint_custom_claims_callable(oidc_tokens, client, oauth2_settings):
class CustomValidator(OAuth2Validator):
oidc_claim_scope = None
def get_additional_claims(self):
return {
"username": claim_user_email,
"email": claim_user_email,
}
oidc_tokens.oauth2_settings.OAUTH2_VALIDATOR_CLASS = CustomValidator
auth_header = "Bearer %s" % oidc_tokens.access_token
rsp = client.get(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION=auth_header,
)
data = rsp.json()
assert "sub" in data
assert data["sub"] == str(oidc_tokens.user.pk)
assert "username" in data
assert data["username"] == EXAMPLE_EMAIL
assert "email" in data
assert data["email"] == EXAMPLE_EMAIL
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_userinfo_endpoint_custom_claims_email_scope_callable(
oidc_email_scope_tokens, client, oauth2_settings
):
class CustomValidator(OAuth2Validator):
def get_additional_claims(self):
return {
"username": claim_user_email,
"email": claim_user_email,
}
oidc_email_scope_tokens.oauth2_settings.OAUTH2_VALIDATOR_CLASS = CustomValidator
auth_header = "Bearer %s" % oidc_email_scope_tokens.access_token
rsp = client.get(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION=auth_header,
)
data = rsp.json()
assert "sub" in data
assert data["sub"] == str(oidc_email_scope_tokens.user.pk)
assert "username" not in data
assert "email" in data
assert data["email"] == EXAMPLE_EMAIL
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_userinfo_endpoint_custom_claims_plain(oidc_tokens, client, oauth2_settings):
class CustomValidator(OAuth2Validator):
oidc_claim_scope = None
def get_additional_claims(self, request):
return {
"username": EXAMPLE_EMAIL,
"email": EXAMPLE_EMAIL,
}
oidc_tokens.oauth2_settings.OAUTH2_VALIDATOR_CLASS = CustomValidator
auth_header = "Bearer %s" % oidc_tokens.access_token
rsp = client.get(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION=auth_header,
)
data = rsp.json()
assert "sub" in data
assert data["sub"] == str(oidc_tokens.user.pk)
assert "username" in data
assert data["username"] == EXAMPLE_EMAIL
assert "email" in data
assert data["email"] == EXAMPLE_EMAIL
@pytest.mark.django_db(databases=retrieve_current_databases())
def test_userinfo_endpoint_custom_claims_email_scopeplain(oidc_email_scope_tokens, client, oauth2_settings):
class CustomValidator(OAuth2Validator):
def get_additional_claims(self, request):
return {
"username": EXAMPLE_EMAIL,
"email": EXAMPLE_EMAIL,
}
oidc_email_scope_tokens.oauth2_settings.OAUTH2_VALIDATOR_CLASS = CustomValidator
auth_header = "Bearer %s" % oidc_email_scope_tokens.access_token
rsp = client.get(
reverse("oauth2_provider:user-info"),
HTTP_AUTHORIZATION=auth_header,
)
data = rsp.json()
assert "sub" in data
assert data["sub"] == str(oidc_email_scope_tokens.user.pk)
assert "username" not in data
assert "email" in data
assert data["email"] == EXAMPLE_EMAIL
| TestJwksInfoView |
python | pyca__cryptography | src/cryptography/hazmat/_oid.py | {
"start": 7938,
"end": 8758
} | class ____:
SHA1 = ObjectIdentifier("1.3.14.3.2.26")
SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.2.4")
SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.2.1")
SHA384 = ObjectIdentifier("2.16.840.1.101.3.4.2.2")
SHA512 = ObjectIdentifier("2.16.840.1.101.3.4.2.3")
SHA3_224 = ObjectIdentifier("1.3.6.1.4.1.37476.3.2.1.99.7.224")
SHA3_256 = ObjectIdentifier("1.3.6.1.4.1.37476.3.2.1.99.7.256")
SHA3_384 = ObjectIdentifier("1.3.6.1.4.1.37476.3.2.1.99.7.384")
SHA3_512 = ObjectIdentifier("1.3.6.1.4.1.37476.3.2.1.99.7.512")
SHA3_224_NIST = ObjectIdentifier("2.16.840.1.101.3.4.2.7")
SHA3_256_NIST = ObjectIdentifier("2.16.840.1.101.3.4.2.8")
SHA3_384_NIST = ObjectIdentifier("2.16.840.1.101.3.4.2.9")
SHA3_512_NIST = ObjectIdentifier("2.16.840.1.101.3.4.2.10")
| HashAlgorithmOID |
python | bokeh__bokeh | src/bokeh/util/dataclasses.py | {
"start": 1569,
"end": 2552
} | class ____:
def __repr__(self) -> str:
return "Unspecified"
Unspecified = _UnspecifiedType()
_T = TypeVar("_T")
NotRequired: TypeAlias = _UnspecifiedType | _T
def entries(obj: Any) -> Iterable[tuple[str, Any]]:
""" Iterate over a dataclass' fields and their values. """
if is_dataclass(obj):
for f in fields(obj):
value = getattr(obj, f.name)
if value is not Unspecified:
yield (f.name, value)
else:
raise TypeError(f"expected a dataclass, got {type(obj)}")
def is_dataclass(obj: Any) -> bool:
return hasattr(type(obj), "__dataclass_fields__")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| _UnspecifiedType |
python | davidhalter__jedi | test/completion/docstring.py | {
"start": 2669,
"end": 3005
} | class ____(object):
def __init__(self):
self.teststr = ""
"""
# jedi issue #210
"""
def test(self):
#? ['teststr']
self.teststr
# -----------------
# statement docstrings
# -----------------
d = ''
""" bsdf """
#? str()
d.upper()
# -----------------
# class docstrings
# -----------------
| Test |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 21441,
"end": 24852
} | class ____(Numeric):
"""
The base class for floating-point datatypes.
"""
default = np.nan
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Numeric.__init__(self, field, config, pos)
precision = field.precision
width = field.width
if precision is None:
format_parts = ["{!s:>"]
else:
format_parts = ["{:"]
if width is not None:
format_parts.append(str(width))
if precision is not None:
if precision.startswith("E"):
format_parts.append(f".{int(precision[1:]):d}g")
elif precision.startswith("F"):
format_parts.append(f".{int(precision[1:]):d}f")
else:
format_parts.append(f".{int(precision):d}f")
format_parts.append("}")
self._output_format = "".join(format_parts)
self.nan = np.array(np.nan, self.format)
if self.null is None:
self._null_output = "NaN"
self._null_binoutput = self.binoutput(self.nan, False)
self.filter_array = self._filter_nan
else:
self._null_output = self.output(np.asarray(self.null), False)
self._null_binoutput = self.binoutput(np.asarray(self.null), False)
self.filter_array = self._filter_null
if config.get("verify", "ignore") == "exception":
self.parse = self._parse_pedantic
else:
self.parse = self._parse_permissive
def supports_empty_values(self, config):
return True
def _parse_pedantic(self, value, config=None, pos=None):
if value.strip() == "":
return self.null, True
f = float(value)
return f, self.is_null(f)
def _parse_permissive(self, value, config=None, pos=None):
try:
f = float(value)
return f, self.is_null(f)
except ValueError:
# IRSA VOTables use the word 'null' to specify empty values,
# but this is not defined in the VOTable spec.
if value.strip() != "":
vo_warn(W30, value, config, pos)
return self.null, True
@property
def output_format(self):
return self._output_format
def output(self, value, mask):
if mask:
return self._null_output
if np.isfinite(value):
if not np.isscalar(value):
value = value.dtype.type(value)
result = self._output_format.format(value)
if result.startswith("array"):
raise RuntimeError()
if self._output_format[2] == "s" and result.endswith(".0"):
result = result[:-2]
return result
elif np.isnan(value):
return "NaN"
elif np.isposinf(value):
return "+InF"
elif np.isneginf(value):
return "-InF"
# Should never raise
vo_raise(f"Invalid floating point value '{value}'")
def binoutput(self, value, mask):
if mask:
return self._null_binoutput
value = _ensure_bigendian(value)
return value.tobytes()
def _filter_nan(self, value, mask):
return np.where(mask, np.nan, value)
def _filter_null(self, value, mask):
return np.where(mask, self.null, value)
| FloatingPoint |
python | google__jax | tests/lax_numpy_operators_test.py | {
"start": 22182,
"end": 23575
} | class ____:
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
try:
lax.broadcast_shapes(*(() if s in scalar_shapes else s for s in shapes))
except ValueError:
return False
else:
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _get_testcase_name(index, params):
dtypes = "_".join(str(dt.__name__) for dt in params['dtypes'])
name = params['op_name'] if "op_name" in params else params["name"]
return f"{index}_{name}_{dtypes}"
def _create_named_parameters(iter_params):
for i, params in enumerate(iter_params):
yield dict(params, **{'testcase_name': _get_testcase_name(i, params)})
| _OverrideNothing |
python | spyder-ide__spyder | spyder/utils/qthelpers.py | {
"start": 18066,
"end": 19427
} | class ____(QObject):
"""
Object that keep references to non-modal dialog boxes for another QObject,
typically a QMainWindow or any kind of QWidget
"""
def __init__(self):
QObject.__init__(self)
self.dialogs = {}
def show(self, dialog):
"""Generic method to show a non-modal dialog and keep reference
to the Qt C++ object"""
for dlg in list(self.dialogs.values()):
if str(dlg.windowTitle()) == str(dialog.windowTitle()):
dlg.show()
dlg.raise_()
break
else:
dialog.show()
self.dialogs[id(dialog)] = dialog
dialog.accepted.connect(
lambda eid=id(dialog): self.dialog_finished(eid))
dialog.rejected.connect(
lambda eid=id(dialog): self.dialog_finished(eid))
def dialog_finished(self, dialog_id):
"""Manage non-modal dialog boxes"""
return self.dialogs.pop(dialog_id)
def close_all(self):
"""Close all opened dialog boxes"""
for dlg in list(self.dialogs.values()):
dlg.reject()
def get_filetype_icon(fname):
"""Return file type icon"""
ext = osp.splitext(fname)[1]
if ext.startswith('.'):
ext = ext[1:]
return ima.get_icon("%s.png" % ext, ima.icon('FileIcon'))
| DialogManager |
python | pandas-dev__pandas | scripts/check_for_inconsistent_pandas_namespace.py | {
"start": 1175,
"end": 4349
} | class ____(ast.NodeVisitor):
def __init__(self) -> None:
self.pandas_namespace: MutableMapping[OffsetWithNamespace, str] = {}
self.imported_from_pandas: set[str] = set()
def visit_Attribute(self, node: ast.Attribute) -> None:
if isinstance(node.value, ast.Name) and node.value.id in {"pandas", "pd"}:
offset_with_namespace = OffsetWithNamespace(
node.lineno, node.col_offset, node.value.id
)
self.pandas_namespace[offset_with_namespace] = node.attr
self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module is not None and "pandas" in node.module:
self.imported_from_pandas.update(name.name for name in node.names)
self.generic_visit(node)
def replace_inconsistent_pandas_namespace(visitor: Visitor, content: str) -> str:
from tokenize_rt import (
reversed_enumerate,
src_to_tokens,
tokens_to_src,
)
tokens = src_to_tokens(content)
for n, i in reversed_enumerate(tokens):
offset_with_namespace = OffsetWithNamespace(i.offset[0], i.offset[1], i.src)
if (
offset_with_namespace in visitor.pandas_namespace
and visitor.pandas_namespace[offset_with_namespace]
in visitor.imported_from_pandas
):
# Replace `pd`
tokens[n] = i._replace(src="")
# Replace `.`
tokens[n + 1] = tokens[n + 1]._replace(src="")
new_src: str = tokens_to_src(tokens)
return new_src
def check_for_inconsistent_pandas_namespace(
content: str, path: str, *, replace: bool
) -> str | None:
tree = ast.parse(content)
visitor = Visitor()
visitor.visit(tree)
inconsistencies = visitor.imported_from_pandas.intersection(
visitor.pandas_namespace.values()
)
if not inconsistencies:
# No inconsistent namespace usage, nothing to replace.
return None
if not replace:
inconsistency = inconsistencies.pop()
lineno, col_offset, prefix = next(
key for key, val in visitor.pandas_namespace.items() if val == inconsistency
)
msg = ERROR_MESSAGE.format(
lineno=lineno,
col_offset=col_offset,
prefix=prefix,
name=inconsistency,
path=path,
)
sys.stdout.write(msg)
sys.exit(1)
return replace_inconsistent_pandas_namespace(visitor, content)
def main(argv: Sequence[str] | None = None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*")
parser.add_argument("--replace", action="store_true")
args = parser.parse_args(argv)
for path in args.paths:
with open(path, encoding="utf-8") as fd:
content = fd.read()
new_content = check_for_inconsistent_pandas_namespace(
content, path, replace=args.replace
)
if not args.replace or new_content is None:
continue
with open(path, "w", encoding="utf-8") as fd:
fd.write(new_content)
if __name__ == "__main__":
main()
| Visitor |
python | imageio__imageio | tests/test_core.py | {
"start": 658,
"end": 849
} | class ____:
"""A dummy plugin to test plugin resultion and dynamic loading"""
def __init__(self, request):
raise InitializationError("Can not read anything")
| UselessDummyPlugin |
python | coleifer__peewee | tests/sql.py | {
"start": 54580,
"end": 72000
} | class ____(BaseTestCase):
def test_partition_unordered(self):
partition = [Register.category]
query = (Register
.select(
Register.category,
Register.value,
fn.AVG(Register.value).over(partition_by=partition))
.order_by(Register.id))
self.assertSQL(query, (
'SELECT "t1"."category", "t1"."value", AVG("t1"."value") '
'OVER (PARTITION BY "t1"."category") '
'FROM "register" AS "t1" ORDER BY "t1"."id"'), [])
def test_ordered_unpartitioned(self):
query = (Register
.select(
Register.value,
fn.RANK().over(order_by=[Register.value])))
self.assertSQL(query, (
'SELECT "t1"."value", RANK() OVER (ORDER BY "t1"."value") '
'FROM "register" AS "t1"'), [])
def test_ordered_partitioned(self):
query = Register.select(
Register.value,
fn.SUM(Register.value).over(
order_by=Register.id,
partition_by=Register.category).alias('rsum'))
self.assertSQL(query, (
'SELECT "t1"."value", SUM("t1"."value") '
'OVER (PARTITION BY "t1"."category" ORDER BY "t1"."id") AS "rsum" '
'FROM "register" AS "t1"'), [])
def test_empty_over(self):
query = (Register
.select(Register.value, fn.LAG(Register.value, 1).over())
.order_by(Register.value))
self.assertSQL(query, (
'SELECT "t1"."value", LAG("t1"."value", ?) OVER () '
'FROM "register" AS "t1" '
'ORDER BY "t1"."value"'), [1])
def test_frame(self):
query = (Register
.select(
Register.value,
fn.AVG(Register.value).over(
partition_by=[Register.category],
start=Window.preceding(),
end=Window.following(2))))
self.assertSQL(query, (
'SELECT "t1"."value", AVG("t1"."value") '
'OVER (PARTITION BY "t1"."category" '
'ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) '
'FROM "register" AS "t1"'), [])
query = (Register
.select(Register.value, fn.AVG(Register.value).over(
partition_by=[Register.category],
order_by=[Register.value],
start=Window.CURRENT_ROW,
end=Window.following())))
self.assertSQL(query, (
'SELECT "t1"."value", AVG("t1"."value") '
'OVER (PARTITION BY "t1"."category" '
'ORDER BY "t1"."value" '
'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) '
'FROM "register" AS "t1"'), [])
def test_frame_types(self):
def assertFrame(over_kwargs, expected):
query = Register.select(
Register.value,
fn.SUM(Register.value).over(**over_kwargs))
sql, params = __sql__(query)
match_obj = re.search(r'OVER \((.*?)\) FROM', sql)
self.assertTrue(match_obj is not None)
self.assertEqual(match_obj.groups()[0], expected)
self.assertEqual(params, [])
# No parameters -- empty OVER().
assertFrame({}, (''))
# Explicitly specify RANGE / ROWS frame-types.
assertFrame({'frame_type': Window.RANGE}, 'RANGE UNBOUNDED PRECEDING')
assertFrame({'frame_type': Window.ROWS}, 'ROWS UNBOUNDED PRECEDING')
# Start and end boundaries.
assertFrame({'start': Window.preceding(), 'end': Window.following()},
'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING')
assertFrame({
'start': Window.preceding(),
'end': Window.following(),
'frame_type': Window.RANGE,
}, 'RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING')
assertFrame({
'start': Window.preceding(),
'end': Window.following(),
'frame_type': Window.ROWS,
}, 'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING')
# Start boundary.
assertFrame({'start': Window.preceding()}, 'ROWS UNBOUNDED PRECEDING')
assertFrame({'start': Window.preceding(), 'frame_type': Window.RANGE},
'RANGE UNBOUNDED PRECEDING')
assertFrame({'start': Window.preceding(), 'frame_type': Window.ROWS},
'ROWS UNBOUNDED PRECEDING')
# Ordered or partitioned.
assertFrame({'order_by': Register.value}, 'ORDER BY "t1"."value"')
assertFrame({'frame_type': Window.RANGE, 'order_by': Register.value},
'ORDER BY "t1"."value" RANGE UNBOUNDED PRECEDING')
assertFrame({'frame_type': Window.ROWS, 'order_by': Register.value},
'ORDER BY "t1"."value" ROWS UNBOUNDED PRECEDING')
assertFrame({'partition_by': Register.category},
'PARTITION BY "t1"."category"')
assertFrame({
'frame_type': Window.RANGE,
'partition_by': Register.category,
}, 'PARTITION BY "t1"."category" RANGE UNBOUNDED PRECEDING')
assertFrame({
'frame_type': Window.ROWS,
'partition_by': Register.category,
}, 'PARTITION BY "t1"."category" ROWS UNBOUNDED PRECEDING')
# Ordering and boundaries.
assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW,
'end': Window.following()},
('ORDER BY "t1"."value" '
'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING'))
assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW,
'end': Window.following(), 'frame_type': Window.RANGE},
('ORDER BY "t1"."value" '
'RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING'))
assertFrame({'order_by': Register.value, 'start': Window.CURRENT_ROW,
'end': Window.following(), 'frame_type': Window.ROWS},
('ORDER BY "t1"."value" '
'ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING'))
def test_running_total(self):
EventLog = Table('evtlog', ('id', 'timestamp', 'data'))
w = fn.SUM(EventLog.timestamp).over(order_by=[EventLog.timestamp])
query = (EventLog
.select(EventLog.timestamp, EventLog.data, w.alias('elapsed'))
.order_by(EventLog.timestamp))
self.assertSQL(query, (
'SELECT "t1"."timestamp", "t1"."data", '
'SUM("t1"."timestamp") OVER (ORDER BY "t1"."timestamp") '
'AS "elapsed" '
'FROM "evtlog" AS "t1" ORDER BY "t1"."timestamp"'), [])
w = fn.SUM(EventLog.timestamp).over(
order_by=[EventLog.timestamp],
partition_by=[EventLog.data])
query = (EventLog
.select(EventLog.timestamp, EventLog.data, w.alias('elapsed'))
.order_by(EventLog.timestamp))
self.assertSQL(query, (
'SELECT "t1"."timestamp", "t1"."data", '
'SUM("t1"."timestamp") OVER '
'(PARTITION BY "t1"."data" ORDER BY "t1"."timestamp") AS "elapsed"'
' FROM "evtlog" AS "t1" ORDER BY "t1"."timestamp"'), [])
def test_named_window(self):
window = Window(partition_by=[Register.category])
query = (Register
.select(
Register.category,
Register.value,
fn.AVG(Register.value).over(window))
.window(window))
self.assertSQL(query, (
'SELECT "t1"."category", "t1"."value", AVG("t1"."value") '
'OVER w '
'FROM "register" AS "t1" '
'WINDOW w AS (PARTITION BY "t1"."category")'), [])
window = Window(
partition_by=[Register.category],
order_by=[Register.value.desc()])
query = (Register
.select(
Register.value,
fn.RANK().over(window))
.window(window))
self.assertSQL(query, (
'SELECT "t1"."value", RANK() OVER w '
'FROM "register" AS "t1" '
'WINDOW w AS ('
'PARTITION BY "t1"."category" '
'ORDER BY "t1"."value" DESC)'), [])
def test_multiple_windows(self):
w1 = Window(partition_by=[Register.category]).alias('w1')
w2 = Window(order_by=[Register.value]).alias('w2')
query = (Register
.select(
Register.value,
fn.AVG(Register.value).over(w1),
fn.RANK().over(w2))
.window(w1, w2))
self.assertSQL(query, (
'SELECT "t1"."value", AVG("t1"."value") OVER w1, RANK() OVER w2 '
'FROM "register" AS "t1" '
'WINDOW w1 AS (PARTITION BY "t1"."category"), '
'w2 AS (ORDER BY "t1"."value")'), [])
def test_alias_window(self):
w = Window(order_by=Register.value).alias('wx')
query = Register.select(Register.value, fn.RANK().over(w)).window(w)
# We can re-alias the window and it's updated alias is reflected
# correctly in the final query.
w.alias('wz')
self.assertSQL(query, (
'SELECT "t1"."value", RANK() OVER wz '
'FROM "register" AS "t1" '
'WINDOW wz AS (ORDER BY "t1"."value")'), [])
def test_reuse_window(self):
EventLog = Table('evt', ('id', 'timestamp', 'key'))
window = Window(partition_by=[EventLog.key],
order_by=[EventLog.timestamp])
query = (EventLog
.select(EventLog.timestamp, EventLog.key,
fn.NTILE(4).over(window).alias('quartile'),
fn.NTILE(5).over(window).alias('quintile'),
fn.NTILE(100).over(window).alias('percentile'))
.order_by(EventLog.timestamp)
.window(window))
self.assertSQL(query, (
'SELECT "t1"."timestamp", "t1"."key", '
'NTILE(?) OVER w AS "quartile", '
'NTILE(?) OVER w AS "quintile", '
'NTILE(?) OVER w AS "percentile" '
'FROM "evt" AS "t1" '
'WINDOW w AS ('
'PARTITION BY "t1"."key" ORDER BY "t1"."timestamp") '
'ORDER BY "t1"."timestamp"'), [4, 5, 100])
def test_filter_clause(self):
condsum = fn.SUM(Register.value).filter(Register.value > 1).over(
order_by=[Register.id], partition_by=[Register.category],
start=Window.preceding(1))
query = (Register
.select(Register.category, Register.value, condsum)
.order_by(Register.category))
self.assertSQL(query, (
'SELECT "t1"."category", "t1"."value", SUM("t1"."value") FILTER ('
'WHERE ("t1"."value" > ?)) OVER (PARTITION BY "t1"."category" '
'ORDER BY "t1"."id" ROWS 1 PRECEDING) '
'FROM "register" AS "t1" '
'ORDER BY "t1"."category"'), [1])
def test_window_in_orderby(self):
Register = Table('register', ['id', 'value'])
w = Window(partition_by=[Register.value], order_by=[Register.id])
query = (Register
.select()
.window(w)
.order_by(fn.FIRST_VALUE(Register.id).over(w)))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" '
'WINDOW w AS (PARTITION BY "t1"."value" ORDER BY "t1"."id") '
'ORDER BY FIRST_VALUE("t1"."id") OVER w'), [])
fv = fn.FIRST_VALUE(Register.id).over(
partition_by=[Register.value],
order_by=[Register.id])
query = Register.select().order_by(fv)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" '
'ORDER BY FIRST_VALUE("t1"."id") '
'OVER (PARTITION BY "t1"."value" ORDER BY "t1"."id")'), [])
def test_window_extends(self):
Tbl = Table('tbl', ('b', 'c'))
w1 = Window(partition_by=[Tbl.b], alias='win1')
w2 = Window(extends=w1, order_by=[Tbl.c], alias='win2')
query = Tbl.select(fn.GROUP_CONCAT(Tbl.c).over(w2)).window(w1, w2)
self.assertSQL(query, (
'SELECT GROUP_CONCAT("t1"."c") OVER win2 FROM "tbl" AS "t1" '
'WINDOW win1 AS (PARTITION BY "t1"."b"), '
'win2 AS (win1 ORDER BY "t1"."c")'), [])
w1 = Window(partition_by=[Tbl.b], alias='w1')
w2 = Window(extends=w1).alias('w2')
w3 = Window(extends=w2).alias('w3')
w4 = Window(extends=w3, order_by=[Tbl.c]).alias('w4')
query = (Tbl
.select(fn.GROUP_CONCAT(Tbl.c).over(w4))
.window(w1, w2, w3, w4))
self.assertSQL(query, (
'SELECT GROUP_CONCAT("t1"."c") OVER w4 FROM "tbl" AS "t1" '
'WINDOW w1 AS (PARTITION BY "t1"."b"), w2 AS (w1), w3 AS (w2), '
'w4 AS (w3 ORDER BY "t1"."c")'), [])
def test_window_ranged(self):
Tbl = Table('tbl', ('a', 'b'))
query = (Tbl
.select(Tbl.a, fn.SUM(Tbl.b).over(
order_by=[Tbl.a.desc()],
frame_type=Window.RANGE,
start=Window.preceding(1),
end=Window.following(2)))
.order_by(Tbl.a.asc()))
self.assertSQL(query, (
'SELECT "t1"."a", SUM("t1"."b") OVER ('
'ORDER BY "t1"."a" DESC RANGE BETWEEN 1 PRECEDING AND 2 FOLLOWING)'
' FROM "tbl" AS "t1" ORDER BY "t1"."a" ASC'), [])
query = (Tbl
.select(Tbl.a, fn.SUM(Tbl.b).over(
order_by=[Tbl.a],
frame_type=Window.GROUPS,
start=Window.preceding(3),
end=Window.preceding(1))))
self.assertSQL(query, (
'SELECT "t1"."a", SUM("t1"."b") OVER ('
'ORDER BY "t1"."a" GROUPS BETWEEN 3 PRECEDING AND 1 PRECEDING) '
'FROM "tbl" AS "t1"'), [])
query = (Tbl
.select(Tbl.a, fn.SUM(Tbl.b).over(
order_by=[Tbl.a],
frame_type=Window.GROUPS,
start=Window.following(1),
end=Window.following(5))))
self.assertSQL(query, (
'SELECT "t1"."a", SUM("t1"."b") OVER ('
'ORDER BY "t1"."a" GROUPS BETWEEN 1 FOLLOWING AND 5 FOLLOWING) '
'FROM "tbl" AS "t1"'), [])
def test_window_frametypes(self):
Tbl = Table('tbl', ('b', 'c'))
fts = (('as_range', Window.RANGE, 'RANGE'),
('as_rows', Window.ROWS, 'ROWS'),
('as_groups', Window.GROUPS, 'GROUPS'))
for method, arg, sql in fts:
w = getattr(Window(order_by=[Tbl.b + 1]), method)()
self.assertSQL(Tbl.select(fn.SUM(Tbl.c).over(w)).window(w), (
'SELECT SUM("t1"."c") OVER w FROM "tbl" AS "t1" '
'WINDOW w AS (ORDER BY ("t1"."b" + ?) '
'%s UNBOUNDED PRECEDING)') % sql, [1])
query = Tbl.select(fn.SUM(Tbl.c)
.over(order_by=[Tbl.b + 1], frame_type=arg))
self.assertSQL(query, (
'SELECT SUM("t1"."c") OVER (ORDER BY ("t1"."b" + ?) '
'%s UNBOUNDED PRECEDING) FROM "tbl" AS "t1"') % sql, [1])
def test_window_frame_exclusion(self):
Tbl = Table('tbl', ('b', 'c'))
fts = ((Window.CURRENT_ROW, 'CURRENT ROW'),
(Window.TIES, 'TIES'),
(Window.NO_OTHERS, 'NO OTHERS'),
(Window.GROUP, 'GROUP'))
for arg, sql in fts:
query = Tbl.select(fn.MAX(Tbl.b).over(
order_by=[Tbl.c],
start=Window.preceding(4),
end=Window.following(),
frame_type=Window.ROWS,
exclude=arg))
self.assertSQL(query, (
'SELECT MAX("t1"."b") OVER (ORDER BY "t1"."c" '
'ROWS BETWEEN 4 PRECEDING AND UNBOUNDED FOLLOWING '
'EXCLUDE %s) FROM "tbl" AS "t1"') % sql, [])
def test_filter_window(self):
# Example derived from sqlite window test 5.1.3.2.
Tbl = Table('tbl', ('a', 'c'))
win = Window(partition_by=fn.COALESCE(Tbl.a, ''),
frame_type=Window.RANGE,
start=Window.CURRENT_ROW,
end=Window.following(),
exclude=Window.NO_OTHERS)
query = (Tbl
.select(fn.SUM(Tbl.c).filter(Tbl.c < 5).over(win),
fn.RANK().over(win),
fn.DENSE_RANK().over(win))
.window(win))
self.assertSQL(query, (
'SELECT SUM("t1"."c") FILTER (WHERE ("t1"."c" < ?)) OVER w, '
'RANK() OVER w, DENSE_RANK() OVER w '
'FROM "tbl" AS "t1" '
'WINDOW w AS (PARTITION BY COALESCE("t1"."a", ?) '
'RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING '
'EXCLUDE NO OTHERS)'), [5, ''])
| TestWindowFunctions |
python | getsentry__sentry | src/sentry/audit_log/events.py | {
"start": 3398,
"end": 3831
} | class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=8, name="MEMBER_PENDING", api_name="member.pending")
def render(self, audit_log_entry: AuditLogEntry) -> str:
user_display_name = _get_member_display(
audit_log_entry.data.get("email"), audit_log_entry.target_user
)
return f"required member {user_display_name} to setup 2FA"
| MemberPendingAuditLogEvent |
python | kamyu104__LeetCode-Solutions | Python/find-the-closest-marked-node.py | {
"start": 227,
"end": 1193
} | class ____(object):
def minimumDistance(self, n, edges, s, marked):
"""
:type n: int
:type edges: List[List[int]]
:type s: int
:type marked: List[int]
:rtype: int
"""
def dijkstra(start):
best = [float("inf")]*len(adj)
best[start] = 0
min_heap = [(0, start)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr > best[u]:
continue
if u in target:
return curr
for v, w in adj[u]:
if curr+w >= best[v]:
continue
best[v] = curr+w
heapq.heappush(min_heap, (best[v], v))
return -1
target = set(marked)
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
adj[u].append((v, w))
return dijkstra(s)
| Solution |
python | PrefectHQ__prefect | src/prefect/cli/transfer/_migratable_resources/concurrency_limits.py | {
"start": 538,
"end": 3032
} | class ____(
MigratableResource[GlobalConcurrencyLimitResponse]
):
_instances: dict[uuid.UUID, Self] = {}
def __init__(self, global_concurrency_limit: GlobalConcurrencyLimitResponse):
self.source_global_concurrency_limit = global_concurrency_limit
self.destination_global_concurrency_limit: (
GlobalConcurrencyLimitResponse | None
) = None
@property
def source_id(self) -> uuid.UUID:
return self.source_global_concurrency_limit.id
@property
def destination_id(self) -> uuid.UUID | None:
return (
self.destination_global_concurrency_limit.id
if self.destination_global_concurrency_limit
else None
)
@classmethod
async def construct(cls, obj: GlobalConcurrencyLimitResponse) -> Self:
if obj.id in cls._instances:
return cls._instances[obj.id]
instance = cls(obj)
cls._instances[obj.id] = instance
return instance
@classmethod
async def get_instance(
cls, id: uuid.UUID
) -> "MigratableResource[GlobalConcurrencyLimitResponse] | None":
if id in cls._instances:
return cls._instances[id]
return None
async def get_dependencies(self) -> "list[MigratableProtocol]":
return []
async def migrate(self) -> None:
async with get_client() as client:
try:
await client.create_global_concurrency_limit(
concurrency_limit=GlobalConcurrencyLimitCreate(
name=self.source_global_concurrency_limit.name,
limit=self.source_global_concurrency_limit.limit,
active=self.source_global_concurrency_limit.active,
active_slots=self.source_global_concurrency_limit.active_slots,
),
)
self.destination_global_concurrency_limit = (
await client.read_global_concurrency_limit_by_name(
self.source_global_concurrency_limit.name
)
)
except ObjectAlreadyExists:
self.destination_global_concurrency_limit = (
await client.read_global_concurrency_limit_by_name(
self.source_global_concurrency_limit.name
)
)
raise TransferSkipped("Already exists")
| MigratableGlobalConcurrencyLimit |
python | run-llama__llama_index | llama-dev/llama_dev/utils.py | {
"start": 232,
"end": 7278
} | class ____(str, Enum):
MAJOR = "major"
MINOR = "minor"
PATCH = "patch"
def bump_version(current_version: str, bump_type: BumpType) -> str:
"""Bump a version string according to semver rules."""
v = Version(current_version)
# Parse the version components
release = v.release
major = release[0] if len(release) > 0 else 0
minor = release[1] if len(release) > 1 else 0
micro = release[2] if len(release) > 2 else 0
version_str = ""
if bump_type == BumpType.MAJOR:
version_str = f"{major + 1}.0.0"
elif bump_type == BumpType.MINOR:
version_str = f"{major}.{minor + 1}.0"
elif bump_type == BumpType.PATCH:
version_str = f"{major}.{minor}.{micro + 1}"
return version_str
def update_pyproject_version(package_path: Path, new_version: str) -> None:
"""Update the version in a pyproject.toml file."""
pyproject_path = package_path / "pyproject.toml"
# Read the file content
with open(pyproject_path, "r") as f:
content = f.read()
pattern = r'^version = "[^"]+"'
new_content = re.sub(
pattern, f'version = "{new_version}"', content, flags=re.MULTILINE
)
# Write the updated content back
with open(pyproject_path, "w") as f:
f.write(new_content)
def package_has_tests(package_path: Path) -> bool:
"""Returns whether a package folder contains a 'tests' subfolder."""
tests_folder = package_path / "tests"
return package_path.is_dir() and tests_folder.exists() and tests_folder.is_dir()
def is_llama_index_package(package_path: Path) -> bool:
"""Returns whether a folder contains a 'pyproject.toml' file."""
pyproject = package_path / "pyproject.toml"
return package_path.is_dir() and pyproject.exists() and pyproject.is_file()
def load_pyproject(package_path: Path) -> dict:
"""Thin wrapper around tomli.load()."""
pyproject_path = package_path / "pyproject.toml"
with open(pyproject_path, "rb") as f:
return tomli.load(f)
def find_integrations(root_path: Path, recursive=False) -> list[Path]:
"""Find all integrations packages in the repo."""
package_roots: list[Path] = []
integrations_root = root_path
if not recursive:
integrations_root = integrations_root / "llama-index-integrations"
for category_path in integrations_root.iterdir():
if not category_path.is_dir():
continue
if category_path.name == "storage":
# The "storage" category has sub-folders
package_roots += find_integrations(category_path, recursive=True)
continue
for package_name in category_path.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_packs(root_path: Path) -> list[Path]:
"""Find all llama-index-packs packages in the repo."""
package_roots: list[Path] = []
packs_root = root_path / "llama-index-packs"
for package_name in packs_root.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_utils(root_path: Path) -> list[Path]:
"""Find all llama-index-utils packages in the repo."""
package_roots: list[Path] = []
utils_root = root_path / "llama-index-utils"
for package_name in utils_root.iterdir():
if is_llama_index_package(package_name):
package_roots.append(package_name)
return package_roots
def find_all_packages(root_path: Path) -> list[Path]:
"""Returns a list of all the package folders in the monorepo."""
return [
root_path / "llama-index-core",
*find_integrations(root_path),
*find_packs(root_path),
*find_utils(root_path),
root_path / "llama-index-instrumentation",
]
def get_changed_files(repo_root: Path, base_ref: str = "main") -> list[Path]:
"""Use git to get the list of files changed compared to the base branch."""
try:
cmd = ["git", "diff", "--name-only", f"{base_ref}...HEAD"]
result = subprocess.run(cmd, cwd=repo_root, text=True, capture_output=True)
if result.returncode != 0:
raise RuntimeError(f"Git command failed: {result.stderr}")
return [repo_root / Path(f) for f in result.stdout.splitlines() if f.strip()]
except Exception as e:
print(f"Exception occurred: {e!s}")
raise
def get_changed_packages(
changed_files: list[Path], all_packages: list[Path]
) -> set[Path]:
"""Get the list of package folders containing the path in 'changed_files'."""
changed_packages: set[Path] = set()
for file_path in changed_files:
# Find the package containing this file
for pkg_dir in all_packages:
if file_path.absolute().is_relative_to(pkg_dir.absolute()):
changed_packages.add(pkg_dir)
break
return changed_packages
def get_dep_names(pyproject_data: dict) -> set[str]:
"""Load dependencies from pyproject.toml."""
dependencies: set[str] = set()
for dep in pyproject_data["project"]["dependencies"]:
matches = DEP_NAME_REGEX.findall(dep)
if not matches:
continue
dependencies.add(matches[0])
return dependencies
def is_python_version_compatible(pyproject_data: dict) -> bool:
"""Check if the package is compatible with the current Python version using packaging."""
# Get current Python version
current_version = version.Version(
f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
)
# Get Python version requirements if they exist
requires_python = pyproject_data.get("project", {}).get("requires-python")
if requires_python is None:
# If no Python version is specified, assume it's compatible
return True
try:
# Parse the version specifier
spec = specifiers.SpecifierSet(requires_python)
# Check if the current version satisfies the specifier
return spec.contains(str(current_version))
except Exception as e:
# If there's any error in parsing, log it and assume compatibility
print(
f"Warning: Could not parse Python version specifier '{requires_python}': {e}"
)
return True
def get_dependants_packages(
changed_packages: set[Path], all_packages: list[Path]
) -> set[Path]:
"""Get packages containing the files in the changeset."""
changed_packages_names: set[str] = set()
for pkg_path in changed_packages:
pyproject_data = load_pyproject(pkg_path)
changed_packages_names.add(pyproject_data["project"]["name"])
dependants_packages: set[Path] = set()
for pkg_path in all_packages:
pyproject_data = load_pyproject(pkg_path)
for dep_name in get_dep_names(pyproject_data):
if dep_name in changed_packages_names:
dependants_packages.add(pkg_path)
return dependants_packages
| BumpType |
python | pypa__setuptools | setuptools/_vendor/zipp/__init__.py | {
"start": 6768,
"end": 13380
} | class ____:
"""
A :class:`importlib.resources.abc.Traversable` interface for zip files.
Implements many of the features users enjoy from
:class:`pathlib.Path`.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> path = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = path.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text(encoding='utf-8')
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile.
>>> str(path)
'mem/abcde.zip/'
>>> path.name
'abcde.zip'
>>> path.filename == pathlib.Path('mem/abcde.zip')
True
>>> str(path.parent)
'mem'
If the zipfile has no filename, such attributes are not
valid and accessing them will raise an Exception.
>>> zf.filename = None
>>> path.name
Traceback (most recent call last):
...
TypeError: ...
>>> path.filename
Traceback (most recent call last):
...
TypeError: ...
>>> path.parent
Traceback (most recent call last):
...
TypeError: ...
# workaround python/cpython#106763
>>> pass
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def __eq__(self, other):
"""
>>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo'
False
"""
if self.__class__ is not other.__class__:
return NotImplemented
return (self.root, self.at) == (other.root, other.at)
def __hash__(self):
return hash((self.root, self.at))
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
# Text mode:
encoding, args, kwargs = _extract_text_encoding(*args, **kwargs)
return io.TextIOWrapper(stream, encoding, *args, **kwargs)
def _base(self):
return pathlib.PurePosixPath(self.at or self.root.filename)
@property
def name(self):
return self._base().name
@property
def suffix(self):
return self._base().suffix
@property
def suffixes(self):
return self._base().suffixes
@property
def stem(self):
return self._base().stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
encoding, args, kwargs = _extract_text_encoding(*args, **kwargs)
with self.open('r', encoding, *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def match(self, path_pattern):
return pathlib.PurePosixPath(self.at).match(path_pattern)
def is_symlink(self):
"""
Return whether this path is a symlink.
"""
info = self.root.getinfo(self.at)
mode = info.external_attr >> 16
return stat.S_ISLNK(mode)
def glob(self, pattern):
if not pattern:
raise ValueError(f"Unacceptable pattern: {pattern!r}")
prefix = re.escape(self.at)
tr = Translator(seps='/')
matches = re.compile(prefix + tr.translate(pattern)).fullmatch
names = (data.filename for data in self.root.filelist)
return map(self._next, filter(matches, names))
def rglob(self, pattern):
return self.glob(f'**/{pattern}')
def relative_to(self, other, *extra):
return posixpath.relpath(str(self), str(other.joinpath(*extra)))
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *other)
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
| Path |
python | kamyu104__LeetCode-Solutions | Python/transform-array-to-all-equal-elements.py | {
"start": 38,
"end": 637
} | class ____(object):
def canMakeEqual(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
def check(target):
cnt = 0
sign = 1
for i in xrange(len(nums)):
if nums[i]*sign == target:
sign = 1
continue
cnt += 1
if i+1 == len(nums) or cnt > k:
return False
sign = -1
return True
return check(1) or check(-1)
# Time: O(n)
# Space: O(1)
# greedy
| Solution |
python | python__mypy | mypy/build.py | {
"start": 19280,
"end": 72405
} | class ____:
"""This class holds shared state for building a mypy program.
It is used to coordinate parsing, import processing, semantic
analysis and type checking. The actual build steps are carried
out by dispatch().
Attributes:
data_dir: Mypy data directory (contains stubs)
search_paths: SearchPaths instance indicating where to look for modules
modules: Mapping of module ID to MypyFile (shared by the passes)
semantic_analyzer:
Semantic analyzer, pass 2
all_types: Map {Expression: Type} from all modules (enabled by export_types)
options: Build options
missing_modules: Set of modules that could not be imported encountered so far
stale_modules: Set of modules that needed to be rechecked (only used by tests)
fg_deps_meta: Metadata for fine-grained dependencies caches associated with modules
fg_deps: A fine-grained dependency map
version_id: The current mypy version (based on commit id when possible)
plugin: Active mypy plugin(s)
plugins_snapshot:
Snapshot of currently active user plugins (versions and hashes)
old_plugins_snapshot:
Plugins snapshot from previous incremental run (or None in
non-incremental mode and if cache was not found)
errors: Used for reporting all errors
flush_errors: A function for processing errors after each SCC
cache_enabled: Whether cache is being read. This is set based on options,
but is disabled if fine-grained cache loading fails
and after an initial fine-grained load. This doesn't
determine whether we write cache files or not.
quickstart_state:
A cache of filename -> mtime/size/hash info used to avoid
needing to hash source files when using a cache with mismatching mtimes
stats: Dict with various instrumentation numbers, it is used
not only for debugging, but also required for correctness,
in particular to check consistency of the fine-grained dependency cache.
fscache: A file system cacher
ast_cache: AST cache to speed up mypy daemon
"""
def __init__(
self,
data_dir: str,
search_paths: SearchPaths,
ignore_prefix: str,
source_set: BuildSourceSet,
reports: Reports | None,
options: Options,
version_id: str,
plugin: Plugin,
plugins_snapshot: dict[str, str],
errors: Errors,
flush_errors: Callable[[str | None, list[str], bool], None],
fscache: FileSystemCache,
stdout: TextIO,
stderr: TextIO,
error_formatter: ErrorFormatter | None = None,
) -> None:
self.stats: dict[str, Any] = {} # Values are ints or floats
self.stdout = stdout
self.stderr = stderr
self.start_time = time.time()
self.data_dir = data_dir
self.errors = errors
self.errors.set_ignore_prefix(ignore_prefix)
self.error_formatter = error_formatter
self.search_paths = search_paths
self.source_set = source_set
self.reports = reports
self.options = options
self.version_id = version_id
self.modules: dict[str, MypyFile] = {}
self.import_map: dict[str, set[str]] = {}
self.missing_modules: set[str] = set()
self.fg_deps_meta: dict[str, FgDepMeta] = {}
# fg_deps holds the dependencies of every module that has been
# processed. We store this in BuildManager so that we can compute
# dependencies as we go, which allows us to free ASTs and type information,
# saving a ton of memory on net.
self.fg_deps: dict[str, set[str]] = {}
# Always convert the plugin to a ChainedPlugin so that it can be manipulated if needed
if not isinstance(plugin, ChainedPlugin):
plugin = ChainedPlugin(options, [plugin])
self.plugin = plugin
# Set of namespaces (module or class) that are being populated during semantic
# analysis and may have missing definitions.
self.incomplete_namespaces: set[str] = set()
self.semantic_analyzer = SemanticAnalyzer(
self.modules,
self.missing_modules,
self.incomplete_namespaces,
self.errors,
self.plugin,
self.import_map,
)
self.all_types: dict[Expression, Type] = {} # Enabled by export_types
self.indirection_detector = TypeIndirectionVisitor()
self.stale_modules: set[str] = set()
self.rechecked_modules: set[str] = set()
self.flush_errors = flush_errors
has_reporters = reports is not None and reports.reporters
self.cache_enabled = (
options.incremental
and (not options.fine_grained_incremental or options.use_fine_grained_cache)
and not has_reporters
)
self.fscache = fscache
self.find_module_cache = FindModuleCache(
self.search_paths, self.fscache, self.options, source_set=self.source_set
)
for module in CORE_BUILTIN_MODULES:
if options.use_builtins_fixtures:
continue
path = self.find_module_cache.find_module(module, fast_path=True)
if not isinstance(path, str):
raise CompileError(
[f"Failed to find builtin module {module}, perhaps typeshed is broken?"]
)
if is_typeshed_file(options.abs_custom_typeshed_dir, path) or is_stub_package_file(
path
):
continue
raise CompileError(
[
f'mypy: "{os.path.relpath(path)}" shadows library module "{module}"',
f'note: A user-defined top-level module with name "{module}" is not supported',
]
)
self.metastore = create_metastore(options)
# a mapping from source files to their corresponding shadow files
# for efficient lookup
self.shadow_map: dict[str, str] = {}
if self.options.shadow_file is not None:
self.shadow_map = dict(self.options.shadow_file)
# a mapping from each file being typechecked to its possible shadow file
self.shadow_equivalence_map: dict[str, str | None] = {}
self.plugin = plugin
self.plugins_snapshot = plugins_snapshot
self.old_plugins_snapshot = read_plugins_snapshot(self)
self.quickstart_state = read_quickstart_file(options, self.stdout)
# Fine grained targets (module top levels and top level functions) processed by
# the semantic analyzer, used only for testing. Currently used only by the new
# semantic analyzer. Tuple of module and target name.
self.processed_targets: list[tuple[str, str]] = []
# Missing stub packages encountered.
self.missing_stub_packages: set[str] = set()
# Cache for mypy ASTs that have completed semantic analysis
# pass 1. When multiple files are added to the build in a
# single daemon increment, only one of the files gets added
# per step and the others are discarded. This gets repeated
# until all the files have been added. This means that a
# new file can be processed O(n**2) times. This cache
# avoids most of this redundant work.
self.ast_cache: dict[str, tuple[MypyFile, list[ErrorInfo]]] = {}
# Number of times we used GC optimization hack for fresh SCCs.
self.gc_freeze_cycles = 0
# Mapping from SCC id to corresponding SCC instance. This is populated
# in process_graph().
self.scc_by_id: dict[int, SCC] = {}
# Global topological order for SCCs. This exists to make order of processing
# SCCs more predictable.
self.top_order: list[int] = []
# Stale SCCs that are queued for processing. Note that as of now we have just
# one worker, that is the same process. In the future, we will support multiple
# parallel worker processes.
self.scc_queue: list[SCC] = []
# SCCs that have been fully processed.
self.done_sccs: set[int] = set()
def dump_stats(self) -> None:
if self.options.dump_build_stats:
print("Stats:")
for key, value in sorted(self.stats_summary().items()):
print(f"{key + ':':24}{value}")
def use_fine_grained_cache(self) -> bool:
return self.cache_enabled and self.options.use_fine_grained_cache
def maybe_swap_for_shadow_path(self, path: str) -> str:
if not self.shadow_map:
return path
path = normpath(path, self.options)
previously_checked = path in self.shadow_equivalence_map
if not previously_checked:
for source, shadow in self.shadow_map.items():
if self.fscache.samefile(path, source):
self.shadow_equivalence_map[path] = shadow
break
else:
self.shadow_equivalence_map[path] = None
shadow_file = self.shadow_equivalence_map.get(path)
return shadow_file if shadow_file else path
def get_stat(self, path: str) -> os.stat_result | None:
return self.fscache.stat_or_none(self.maybe_swap_for_shadow_path(path))
def getmtime(self, path: str) -> int:
"""Return a file's mtime; but 0 in bazel mode.
(Bazel's distributed cache doesn't like filesystem metadata to
end up in output files.)
"""
if self.options.bazel:
return 0
else:
return int(self.metastore.getmtime(path))
def correct_rel_imp(self, file: MypyFile, imp: ImportFrom | ImportAll) -> str:
"""Function to correct for relative imports."""
file_id = file.fullname
rel = imp.relative
if rel == 0:
return imp.id
if os.path.basename(file.path).startswith("__init__."):
rel -= 1
if rel != 0:
file_id = ".".join(file_id.split(".")[:-rel])
new_id = file_id + "." + imp.id if imp.id else file_id
if not new_id:
self.errors.set_file(file.path, file.name, self.options)
self.errors.report(
imp.line, 0, "No parent module -- cannot perform relative import", blocker=True
)
return new_id
def all_imported_modules_in_file(self, file: MypyFile) -> list[tuple[int, str, int]]:
"""Find all reachable import statements in a file.
Return list of tuples (priority, module id, import line number)
for all modules imported in file; lower numbers == higher priority.
Can generate blocking errors on bogus relative imports.
"""
res: list[tuple[int, str, int]] = []
for imp in file.imports:
if not imp.is_unreachable:
if isinstance(imp, Import):
pri = import_priority(imp, PRI_MED)
ancestor_pri = import_priority(imp, PRI_LOW)
for id, _ in imp.ids:
res.append((pri, id, imp.line))
ancestor_parts = id.split(".")[:-1]
ancestors = []
for part in ancestor_parts:
ancestors.append(part)
res.append((ancestor_pri, ".".join(ancestors), imp.line))
elif isinstance(imp, ImportFrom):
cur_id = self.correct_rel_imp(file, imp)
all_are_submodules = True
# Also add any imported names that are submodules.
pri = import_priority(imp, PRI_MED)
for name, __ in imp.names:
sub_id = cur_id + "." + name
if self.is_module(sub_id):
res.append((pri, sub_id, imp.line))
else:
all_are_submodules = False
# Add cur_id as a dependency, even if all the
# imports are submodules. Processing import from will try
# to look through cur_id, so we should depend on it.
# As a workaround for some bugs in cycle handling (#4498),
# if all the imports are submodules, do the import at a lower
# priority.
pri = import_priority(imp, PRI_HIGH if not all_are_submodules else PRI_LOW)
res.append((pri, cur_id, imp.line))
elif isinstance(imp, ImportAll):
pri = import_priority(imp, PRI_HIGH)
res.append((pri, self.correct_rel_imp(file, imp), imp.line))
# Sort such that module (e.g. foo.bar.baz) comes before its ancestors (e.g. foo
# and foo.bar) so that, if FindModuleCache finds the target module in a
# package marked with py.typed underneath a namespace package installed in
# site-packages, (gasp), that cache's knowledge of the ancestors
# (aka FindModuleCache.ns_ancestors) can be primed when it is asked to find
# the parent.
res.sort(key=lambda x: -x[1].count("."))
return res
def is_module(self, id: str) -> bool:
"""Is there a file in the file system corresponding to module id?"""
return find_module_simple(id, self) is not None
def parse_file(
self, id: str, path: str, source: str, ignore_errors: bool, options: Options
) -> MypyFile:
"""Parse the source of a file with the given name.
Raise CompileError if there is a parse error.
"""
t0 = time.time()
if ignore_errors:
self.errors.ignored_files.add(path)
tree = parse(source, path, id, self.errors, options=options)
tree._fullname = id
self.add_stats(
files_parsed=1,
modules_parsed=int(not tree.is_stub),
stubs_parsed=int(tree.is_stub),
parse_time=time.time() - t0,
)
if self.errors.is_blockers():
self.log("Bailing due to parse errors")
self.errors.raise_error()
self.errors.set_file_ignored_lines(path, tree.ignored_lines, ignore_errors)
return tree
def load_fine_grained_deps(self, id: str) -> dict[str, set[str]]:
t0 = time.time()
if id in self.fg_deps_meta:
# TODO: Assert deps file wasn't changed.
deps = json_loads(self.metastore.read(self.fg_deps_meta[id]["path"]))
else:
deps = {}
val = {k: set(v) for k, v in deps.items()}
self.add_stats(load_fg_deps_time=time.time() - t0)
return val
def report_file(
self, file: MypyFile, type_map: dict[Expression, Type], options: Options
) -> None:
if self.reports is not None and self.source_set.is_source(file):
self.reports.file(file, self.modules, type_map, options)
def verbosity(self) -> int:
return self.options.verbosity
def log(self, *message: str) -> None:
if self.verbosity() >= 1:
if message:
print("LOG: ", *message, file=self.stderr)
else:
print(file=self.stderr)
self.stderr.flush()
def log_fine_grained(self, *message: str) -> None:
if self.verbosity() >= 1:
self.log("fine-grained:", *message)
elif mypy.build.DEBUG_FINE_GRAINED:
# Output log in a simplified format that is quick to browse.
if message:
print(*message, file=self.stderr)
else:
print(file=self.stderr)
self.stderr.flush()
def trace(self, *message: str) -> None:
if self.verbosity() >= 2:
print("TRACE:", *message, file=self.stderr)
self.stderr.flush()
def add_stats(self, **kwds: Any) -> None:
for key, value in kwds.items():
if key in self.stats:
self.stats[key] += value
else:
self.stats[key] = value
def stats_summary(self) -> Mapping[str, object]:
return self.stats
def submit(self, sccs: list[SCC]) -> None:
"""Submit a stale SCC for processing in current process."""
self.scc_queue.extend(sccs)
def wait_for_done(self, graph: Graph) -> tuple[list[SCC], bool]:
"""Wait for a stale SCC processing (in process) to finish.
Return next processed SCC and whether we have more in the queue.
This emulates the API we will have for parallel processing
in multiple worker processes.
"""
if not self.scc_queue:
return [], False
next_scc = self.scc_queue.pop(0)
process_stale_scc(graph, next_scc, self)
return [next_scc], bool(self.scc_queue)
def deps_to_json(x: dict[str, set[str]]) -> bytes:
return json_dumps({k: list(v) for k, v in x.items()})
# File for storing metadata about all the fine-grained dependency caches
DEPS_META_FILE: Final = "@deps.meta.json"
# File for storing fine-grained dependencies that didn't a parent in the build
DEPS_ROOT_FILE: Final = "@root.deps.json"
# The name of the fake module used to store fine-grained dependencies that
# have no other place to go.
FAKE_ROOT_MODULE: Final = "@root"
def write_deps_cache(
rdeps: dict[str, dict[str, set[str]]], manager: BuildManager, graph: Graph
) -> None:
"""Write cache files for fine-grained dependencies.
Serialize fine-grained dependencies map for fine-grained mode.
Dependencies on some module 'm' is stored in the dependency cache
file m.deps.json. This entails some spooky action at a distance:
if module 'n' depends on 'm', that produces entries in m.deps.json.
When there is a dependency on a module that does not exist in the
build, it is stored with its first existing parent module. If no
such module exists, it is stored with the fake module FAKE_ROOT_MODULE.
This means that the validity of the fine-grained dependency caches
are a global property, so we store validity checking information for
fine-grained dependencies in a global cache file:
* We take a snapshot of current sources to later check consistency
between the fine-grained dependency cache and module cache metadata
* We store the mtime of all the dependency files to verify they
haven't changed
"""
metastore = manager.metastore
error = False
fg_deps_meta = manager.fg_deps_meta.copy()
for id in rdeps:
if id != FAKE_ROOT_MODULE:
_, _, deps_json = get_cache_names(id, graph[id].xpath, manager.options)
else:
deps_json = DEPS_ROOT_FILE
assert deps_json
manager.log("Writing deps cache", deps_json)
if not manager.metastore.write(deps_json, deps_to_json(rdeps[id])):
manager.log(f"Error writing fine-grained deps JSON file {deps_json}")
error = True
else:
fg_deps_meta[id] = {"path": deps_json, "mtime": manager.getmtime(deps_json)}
meta_snapshot: dict[str, str] = {}
for id, st in graph.items():
# If we didn't parse a file (so it doesn't have a
# source_hash), then it must be a module with a fresh cache,
# so use the hash from that.
if st.source_hash:
hash = st.source_hash
else:
if st.meta:
hash = st.meta.hash
else:
hash = ""
meta_snapshot[id] = hash
meta = {"snapshot": meta_snapshot, "deps_meta": fg_deps_meta}
if not metastore.write(DEPS_META_FILE, json_dumps(meta)):
manager.log(f"Error writing fine-grained deps meta JSON file {DEPS_META_FILE}")
error = True
if error:
manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
manager.errors.report(0, 0, "Error writing fine-grained dependencies cache", blocker=True)
def invert_deps(deps: dict[str, set[str]], graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Splits fine-grained dependencies based on the module of the trigger.
Returns a dictionary from module ids to all dependencies on that
module. Dependencies not associated with a module in the build will be
associated with the nearest parent module that is in the build, or the
fake module FAKE_ROOT_MODULE if none are.
"""
# Lazy import to speed up startup
from mypy.server.target import trigger_to_target
# Prepopulate the map for all the modules that have been processed,
# so that we always generate files for processed modules (even if
# there aren't any dependencies to them.)
rdeps: dict[str, dict[str, set[str]]] = {id: {} for id, st in graph.items() if st.tree}
for trigger, targets in deps.items():
module = module_prefix(graph, trigger_to_target(trigger))
if not module or not graph[module].tree:
module = FAKE_ROOT_MODULE
mod_rdeps = rdeps.setdefault(module, {})
mod_rdeps.setdefault(trigger, set()).update(targets)
return rdeps
def generate_deps_for_cache(manager: BuildManager, graph: Graph) -> dict[str, dict[str, set[str]]]:
"""Generate fine-grained dependencies into a form suitable for serializing.
This does a couple things:
1. Splits fine-grained deps based on the module of the trigger
2. For each module we generated fine-grained deps for, load any previous
deps and merge them in.
Returns a dictionary from module ids to all dependencies on that
module. Dependencies not associated with a module in the build will be
associated with the nearest parent module that is in the build, or the
fake module FAKE_ROOT_MODULE if none are.
"""
from mypy.server.deps import merge_dependencies # Lazy import to speed up startup
# Split the dependencies out into based on the module that is depended on.
rdeps = invert_deps(manager.fg_deps, graph)
# We can't just clobber existing dependency information, so we
# load the deps for every module we've generated new dependencies
# to and merge the new deps into them.
for module, mdeps in rdeps.items():
old_deps = manager.load_fine_grained_deps(module)
merge_dependencies(old_deps, mdeps)
return rdeps
PLUGIN_SNAPSHOT_FILE: Final = "@plugins_snapshot.json"
def write_plugins_snapshot(manager: BuildManager) -> None:
"""Write snapshot of versions and hashes of currently active plugins."""
snapshot = json_dumps(manager.plugins_snapshot)
if (
not manager.metastore.write(PLUGIN_SNAPSHOT_FILE, snapshot)
and manager.options.cache_dir != os.devnull
):
manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options)
manager.errors.report(0, 0, "Error writing plugins snapshot", blocker=True)
def read_plugins_snapshot(manager: BuildManager) -> dict[str, str] | None:
"""Read cached snapshot of versions and hashes of plugins from previous run."""
snapshot = _load_json_file(
PLUGIN_SNAPSHOT_FILE,
manager,
log_success="Plugins snapshot ",
log_error="Could not load plugins snapshot: ",
)
if snapshot is None:
return None
if not isinstance(snapshot, dict):
manager.log(f"Could not load plugins snapshot: cache is not a dict: {type(snapshot)}") # type: ignore[unreachable]
return None
return snapshot
def read_quickstart_file(
options: Options, stdout: TextIO
) -> dict[str, tuple[float, int, str]] | None:
quickstart: dict[str, tuple[float, int, str]] | None = None
if options.quickstart_file:
# This is very "best effort". If the file is missing or malformed,
# just ignore it.
raw_quickstart: dict[str, Any] = {}
try:
with open(options.quickstart_file, "rb") as f:
raw_quickstart = json_loads(f.read())
quickstart = {}
for file, (x, y, z) in raw_quickstart.items():
quickstart[file] = (x, y, z)
except Exception as e:
print(f"Warning: Failed to load quickstart file: {str(e)}\n", file=stdout)
return quickstart
def read_deps_cache(manager: BuildManager, graph: Graph) -> dict[str, FgDepMeta] | None:
"""Read and validate the fine-grained dependencies cache.
See the write_deps_cache documentation for more information on
the details of the cache.
Returns None if the cache was invalid in some way.
"""
deps_meta = _load_json_file(
DEPS_META_FILE,
manager,
log_success="Deps meta ",
log_error="Could not load fine-grained dependency metadata: ",
)
if deps_meta is None:
return None
meta_snapshot = deps_meta["snapshot"]
# Take a snapshot of the source hashes from all the metas we found.
# (Including the ones we rejected because they were out of date.)
# We use this to verify that they match up with the proto_deps.
current_meta_snapshot = {
id: st.meta_source_hash for id, st in graph.items() if st.meta_source_hash is not None
}
common = set(meta_snapshot.keys()) & set(current_meta_snapshot.keys())
if any(meta_snapshot[id] != current_meta_snapshot[id] for id in common):
# TODO: invalidate also if options changed (like --strict-optional)?
manager.log("Fine-grained dependencies cache inconsistent, ignoring")
return None
module_deps_metas = deps_meta["deps_meta"]
assert isinstance(module_deps_metas, dict)
if not manager.options.skip_cache_mtime_checks:
for meta in module_deps_metas.values():
try:
matched = manager.getmtime(meta["path"]) == meta["mtime"]
except FileNotFoundError:
matched = False
if not matched:
manager.log(f"Invalid or missing fine-grained deps cache: {meta['path']}")
return None
return module_deps_metas
def _load_ff_file(file: str, manager: BuildManager, log_error: str) -> bytes | None:
t0 = time.time()
try:
data = manager.metastore.read(file)
except OSError:
manager.log(log_error + file)
return None
manager.add_stats(metastore_read_time=time.time() - t0)
return data
def _load_json_file(
file: str, manager: BuildManager, log_success: str, log_error: str
) -> dict[str, Any] | None:
"""A simple helper to read a JSON file with logging."""
t0 = time.time()
try:
data = manager.metastore.read(file)
except OSError:
manager.log(log_error + file)
return None
manager.add_stats(metastore_read_time=time.time() - t0)
# Only bother to compute the log message if we are logging it, since it could be big
if manager.verbosity() >= 2:
manager.trace(log_success + data.rstrip().decode())
try:
t1 = time.time()
result = json_loads(data)
manager.add_stats(data_file_load_time=time.time() - t1)
except json.JSONDecodeError:
manager.errors.set_file(file, None, manager.options)
manager.errors.report(
-1,
-1,
"Error reading JSON file;"
" you likely have a bad cache.\n"
"Try removing the {cache_dir} directory"
" and run mypy again.".format(cache_dir=manager.options.cache_dir),
blocker=True,
)
return None
else:
assert isinstance(result, dict)
return result
def _cache_dir_prefix(options: Options) -> str:
"""Get current cache directory (or file if id is given)."""
if options.bazel:
# This is needed so the cache map works.
return os.curdir
cache_dir = options.cache_dir
pyversion = options.python_version
base = os.path.join(cache_dir, "%d.%d" % pyversion)
return base
def add_catch_all_gitignore(target_dir: str) -> None:
"""Add catch-all .gitignore to an existing directory.
No-op if the .gitignore already exists.
"""
gitignore = os.path.join(target_dir, ".gitignore")
try:
with open(gitignore, "x") as f:
print("# Automatically created by mypy", file=f)
print("*", file=f)
except FileExistsError:
pass
def exclude_from_backups(target_dir: str) -> None:
"""Exclude the directory from various archives and backups supporting CACHEDIR.TAG.
If the CACHEDIR.TAG file exists the function is a no-op.
"""
cachedir_tag = os.path.join(target_dir, "CACHEDIR.TAG")
try:
with open(cachedir_tag, "x") as f:
f.write(
"""Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag automatically created by mypy.
# For information about cache directory tags see https://bford.info/cachedir/
"""
)
except FileExistsError:
pass
def create_metastore(options: Options) -> MetadataStore:
"""Create the appropriate metadata store."""
if options.sqlite_cache:
mds: MetadataStore = SqliteMetadataStore(_cache_dir_prefix(options))
else:
mds = FilesystemMetadataStore(_cache_dir_prefix(options))
return mds
def get_cache_names(id: str, path: str, options: Options) -> tuple[str, str, str | None]:
"""Return the file names for the cache files.
Args:
id: module ID
path: module path
options: build options
Returns:
A tuple with the file names to be used for the meta file, the
data file, and the fine-grained deps JSON, respectively.
"""
if options.cache_map:
pair = options.cache_map.get(normpath(path, options))
else:
pair = None
if pair is not None:
# The cache map paths were specified relative to the base directory,
# but the filesystem metastore APIs operates relative to the cache
# prefix directory.
# Solve this by rewriting the paths as relative to the root dir.
# This only makes sense when using the filesystem backed cache.
root = _cache_dir_prefix(options)
return os.path.relpath(pair[0], root), os.path.relpath(pair[1], root), None
prefix = os.path.join(*id.split("."))
is_package = os.path.basename(path).startswith("__init__.py")
if is_package:
prefix = os.path.join(prefix, "__init__")
deps_json = None
if options.cache_fine_grained:
deps_json = prefix + ".deps.json"
if options.fixed_format_cache:
data_suffix = ".data.ff"
meta_suffix = ".meta.ff"
else:
data_suffix = ".data.json"
meta_suffix = ".meta.json"
return prefix + meta_suffix, prefix + data_suffix, deps_json
def options_snapshot(id: str, manager: BuildManager) -> dict[str, object]:
"""Make compact snapshot of options for a module.
Separately store only the options we may compare individually, and take a hash
of everything else. If --debug-cache is specified, fall back to full snapshot.
"""
snapshot = manager.options.clone_for_module(id).select_options_affecting_cache()
if manager.options.debug_cache:
return snapshot
platform_opt = snapshot.pop("platform")
return {"platform": platform_opt, "other_options": hash_digest(json_dumps(snapshot))}
def find_cache_meta(id: str, path: str, manager: BuildManager) -> CacheMeta | None:
"""Find cache data for a module.
Args:
id: module ID
path: module path
manager: the build manager (for pyversion, log/trace, and build options)
Returns:
A CacheMeta instance if the cache data was found and appears
valid; otherwise None.
"""
# TODO: May need to take more build options into account
meta_file, data_file, _ = get_cache_names(id, path, manager.options)
manager.trace(f"Looking for {id} at {meta_file}")
meta: bytes | dict[str, Any] | None
t0 = time.time()
if manager.options.fixed_format_cache:
meta = _load_ff_file(meta_file, manager, log_error=f"Could not load cache for {id}: ")
if meta is None:
return None
else:
meta = _load_json_file(
meta_file,
manager,
log_success=f"Meta {id} ",
log_error=f"Could not load cache for {id}: ",
)
if meta is None:
return None
if not isinstance(meta, dict):
manager.log( # type: ignore[unreachable]
f"Could not load cache for {id}: meta cache is not a dict: {repr(meta)}"
)
return None
t1 = time.time()
if isinstance(meta, bytes):
# If either low-level buffer format or high-level cache layout changed, we
# cannot use the cache files, even with --skip-version-check.
# TODO: switch to something like librt.internal.read_byte() if this is slow.
if meta[0] != cache_version() or meta[1] != CACHE_VERSION:
manager.log(f"Metadata abandoned for {id}: incompatible cache format")
return None
data_io = ReadBuffer(meta[2:])
m = CacheMeta.read(data_io, data_file)
else:
m = CacheMeta.deserialize(meta, data_file)
if m is None:
manager.log(f"Metadata abandoned for {id}: cannot deserialize data")
return None
t2 = time.time()
manager.add_stats(
load_meta_time=t2 - t0, load_meta_load_time=t1 - t0, load_meta_from_dict_time=t2 - t1
)
# Ignore cache if generated by an older mypy version.
if m.version_id != manager.version_id and not manager.options.skip_version_check:
manager.log(f"Metadata abandoned for {id}: different mypy version")
return None
total_deps = len(m.dependencies) + len(m.suppressed)
if len(m.dep_prios) != total_deps or len(m.dep_lines) != total_deps:
manager.log(f"Metadata abandoned for {id}: broken dependencies")
return None
# Ignore cache if (relevant) options aren't the same.
# Note that it's fine to mutilate cached_options since it's only used here.
cached_options = m.options
current_options = options_snapshot(id, manager)
if manager.options.skip_version_check:
# When we're lax about version we're also lax about platform.
cached_options["platform"] = current_options["platform"]
if "debug_cache" in cached_options:
# Older versions included debug_cache, but it's silly to compare it.
del cached_options["debug_cache"]
if cached_options != current_options:
manager.log(f"Metadata abandoned for {id}: options differ")
if manager.options.verbosity >= 2:
for key in sorted(set(cached_options) | set(current_options)):
if cached_options.get(key) != current_options.get(key):
manager.trace(
" {}: {} != {}".format(
key, cached_options.get(key), current_options.get(key)
)
)
return None
if manager.old_plugins_snapshot and manager.plugins_snapshot:
# Check if plugins are still the same.
if manager.plugins_snapshot != manager.old_plugins_snapshot:
manager.log(f"Metadata abandoned for {id}: plugins differ")
return None
# So that plugins can return data with tuples in it without
# things silently always invalidating modules, we round-trip
# the config data. This isn't beautiful.
plugin_data = json_loads(
json_dumps(manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=True)))
)
if m.plugin_data != plugin_data:
manager.log(f"Metadata abandoned for {id}: plugin configuration differs")
return None
manager.add_stats(fresh_metas=1)
return m
def validate_meta(
meta: CacheMeta | None, id: str, path: str | None, ignore_all: bool, manager: BuildManager
) -> CacheMeta | None:
"""Checks whether the cached AST of this module can be used.
Returns:
None, if the cached AST is unusable.
Original meta, if mtime/size matched.
Meta with mtime updated to match source file, if hash/size matched but mtime/path didn't.
"""
# This requires two steps. The first one is obvious: we check that the module source file
# contents is the same as it was when the cache data file was created. The second one is not
# too obvious: we check that the cache data file mtime has not changed; it is needed because
# we use cache data file mtime to propagate information about changes in the dependencies.
if meta is None:
manager.log(f"Metadata not found for {id}")
return None
if meta.ignore_all and not ignore_all:
manager.log(f"Metadata abandoned for {id}: errors were previously ignored")
return None
t0 = time.time()
bazel = manager.options.bazel
assert path is not None, "Internal error: meta was provided without a path"
if not manager.options.skip_cache_mtime_checks:
# Check data_file; assume if its mtime matches it's good.
try:
data_mtime = manager.getmtime(meta.data_file)
except OSError:
manager.log(f"Metadata abandoned for {id}: failed to stat data_file")
return None
if data_mtime != meta.data_mtime:
manager.log(f"Metadata abandoned for {id}: data cache is modified")
return None
if bazel:
# Normalize path under bazel to make sure it isn't absolute
path = normpath(path, manager.options)
st = manager.get_stat(path)
if st is None:
return None
if not stat.S_ISDIR(st.st_mode) and not stat.S_ISREG(st.st_mode):
manager.log(f"Metadata abandoned for {id}: file or directory {path} does not exist")
return None
manager.add_stats(validate_stat_time=time.time() - t0)
# When we are using a fine-grained cache, we want our initial
# build() to load all of the cache information and then do a
# fine-grained incremental update to catch anything that has
# changed since the cache was generated. We *don't* want to do a
# coarse-grained incremental rebuild, so we accept the cache
# metadata even if it doesn't match the source file.
#
# We still *do* the mtime/hash checks, however, to enable
# fine-grained mode to take advantage of the mtime-updating
# optimization when mtimes differ but hashes match. There is
# essentially no extra time cost to computing the hash here, since
# it will be cached and will be needed for finding changed files
# later anyways.
fine_grained_cache = manager.use_fine_grained_cache()
size = st.st_size
# Bazel ensures the cache is valid.
if size != meta.size and not bazel and not fine_grained_cache:
manager.log(f"Metadata abandoned for {id}: file {path} has different size")
return None
# Bazel ensures the cache is valid.
mtime = 0 if bazel else int(st.st_mtime)
if not bazel and (mtime != meta.mtime or path != meta.path):
if manager.quickstart_state and path in manager.quickstart_state:
# If the mtime and the size of the file recorded in the quickstart dump matches
# what we see on disk, we know (assume) that the hash matches the quickstart
# data as well. If that hash matches the hash in the metadata, then we know
# the file is up to date even though the mtime is wrong, without needing to hash it.
qmtime, qsize, qhash = manager.quickstart_state[path]
if int(qmtime) == mtime and qsize == size and qhash == meta.hash:
manager.log(f"Metadata fresh (by quickstart) for {id}: file {path}")
meta.mtime = mtime
meta.path = path
return meta
t0 = time.time()
try:
# dir means it is a namespace package
if stat.S_ISDIR(st.st_mode):
source_hash = ""
else:
source_hash = manager.fscache.hash_digest(path)
except (OSError, UnicodeDecodeError, DecodeError):
return None
manager.add_stats(validate_hash_time=time.time() - t0)
if source_hash != meta.hash:
if fine_grained_cache:
manager.log(f"Using stale metadata for {id}: file {path}")
return meta
else:
manager.log(f"Metadata abandoned for {id}: file {path} has different hash")
return None
else:
t0 = time.time()
# Optimization: update mtime and path (otherwise, this mismatch will reappear).
meta.mtime = mtime
meta.path = path
meta.size = size
meta.options = options_snapshot(id, manager)
meta_file, _, _ = get_cache_names(id, path, manager.options)
manager.log(
"Updating mtime for {}: file {}, meta {}, mtime {}".format(
id, path, meta_file, meta.mtime
)
)
write_cache_meta(meta, manager, meta_file)
t1 = time.time()
manager.add_stats(validate_update_time=time.time() - t1, validate_munging_time=t1 - t0)
return meta
# It's a match on (id, path, size, hash, mtime).
manager.log(f"Metadata fresh for {id}: file {path}")
return meta
def compute_hash(text: str) -> str:
# We use a crypto hash instead of the builtin hash(...) function
# because the output of hash(...) can differ between runs due to
# hash randomization (enabled by default in Python 3.3). See the
# note in
# https://docs.python.org/3/reference/datamodel.html#object.__hash__.
return hash_digest(text.encode("utf-8"))
def write_cache(
id: str,
path: str,
tree: MypyFile,
dependencies: list[str],
suppressed: list[str],
dep_prios: list[int],
dep_lines: list[int],
old_interface_hash: bytes,
source_hash: str,
ignore_all: bool,
manager: BuildManager,
) -> tuple[bytes, tuple[CacheMeta, str] | None]:
"""Write cache files for a module.
Note that this mypy's behavior is still correct when any given
write_cache() call is replaced with a no-op, so error handling
code that bails without writing anything is okay.
Args:
id: module ID
path: module path
tree: the fully checked module data
dependencies: module IDs on which this module depends
suppressed: module IDs which were suppressed as dependencies
dep_prios: priorities (parallel array to dependencies)
dep_lines: import line locations (parallel array to dependencies)
old_interface_hash: the hash from the previous version of the data cache file
source_hash: the hash of the source code
ignore_all: the ignore_all flag for this module
manager: the build manager (for pyversion, log/trace)
Returns:
A tuple containing the interface hash and inner tuple with CacheMeta
that should be written and path to cache file (inner tuple may be None,
if the cache data could not be written).
"""
metastore = manager.metastore
# For Bazel we use relative paths and zero mtimes.
bazel = manager.options.bazel
# Obtain file paths.
meta_file, data_file, _ = get_cache_names(id, path, manager.options)
manager.log(f"Writing {id} {path} {meta_file} {data_file}")
# Update tree.path so that in bazel mode it's made relative (since
# sometimes paths leak out).
if bazel:
tree.path = path
plugin_data = manager.plugin.report_config_data(ReportConfigContext(id, path, is_check=False))
# Serialize data and analyze interface
if manager.options.fixed_format_cache:
data_io = WriteBuffer()
tree.write(data_io)
data_bytes = data_io.getvalue()
else:
data = tree.serialize()
data_bytes = json_dumps(data, manager.options.debug_cache)
interface_hash = hash_digest_bytes(data_bytes + json_dumps(plugin_data))
# Obtain and set up metadata
st = manager.get_stat(path)
if st is None:
manager.log(f"Cannot get stat for {path}")
# Remove apparently-invalid cache files.
# (This is purely an optimization.)
for filename in [data_file, meta_file]:
try:
os.remove(filename)
except OSError:
pass
# Still return the interface hash we computed.
return interface_hash, None
# Write data cache file, if applicable
# Note that for Bazel we don't record the data file's mtime.
if old_interface_hash == interface_hash:
manager.trace(f"Interface for {id} is unchanged")
else:
manager.trace(f"Interface for {id} has changed")
if not metastore.write(data_file, data_bytes):
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
manager.log(f"Error writing cache data file {data_file}")
# Let's continue without writing the meta file. Analysis:
# If the replace failed, we've changed nothing except left
# behind an extraneous temporary file; if the replace
# worked but the getmtime() call failed, the meta file
# will be considered invalid on the next run because the
# data_mtime field won't match the data file's mtime.
# Both have the effect of slowing down the next run a
# little bit due to an out-of-date cache file.
return interface_hash, None
try:
data_mtime = manager.getmtime(data_file)
except OSError:
manager.log(f"Error in os.stat({data_file!r}), skipping cache write")
return interface_hash, None
mtime = 0 if bazel else int(st.st_mtime)
size = st.st_size
# Note that the options we store in the cache are the options as
# specified by the command line/config file and *don't* reflect
# updates made by inline config directives in the file. This is
# important, or otherwise the options would never match when
# verifying the cache.
assert source_hash is not None
meta = CacheMeta(
id=id,
path=path,
mtime=mtime,
size=size,
hash=source_hash,
dependencies=dependencies,
data_mtime=data_mtime,
data_file=data_file,
suppressed=suppressed,
options=options_snapshot(id, manager),
dep_prios=dep_prios,
dep_lines=dep_lines,
interface_hash=interface_hash,
version_id=manager.version_id,
ignore_all=ignore_all,
plugin_data=plugin_data,
# These two will be filled by the caller.
dep_hashes=[],
error_lines=[],
)
return interface_hash, (meta, meta_file)
def write_cache_meta(meta: CacheMeta, manager: BuildManager, meta_file: str) -> None:
# Write meta cache file
metastore = manager.metastore
if manager.options.fixed_format_cache:
data_io = WriteBuffer()
meta.write(data_io)
# Prefix with both low- and high-level cache format versions for future validation.
# TODO: switch to something like librt.internal.write_byte() if this is slow.
meta_bytes = bytes([cache_version(), CACHE_VERSION]) + data_io.getvalue()
else:
meta_dict = meta.serialize()
meta_bytes = json_dumps(meta_dict, manager.options.debug_cache)
if not metastore.write(meta_file, meta_bytes):
# Most likely the error is the replace() call
# (see https://github.com/python/mypy/issues/3215).
# The next run will simply find the cache entry out of date.
manager.log(f"Error writing cache meta file {meta_file}")
"""Dependency manager.
Design
======
Ideally
-------
A. Collapse cycles (each SCC -- strongly connected component --
becomes one "supernode").
B. Topologically sort nodes based on dependencies.
C. Process from leaves towards roots.
Wrinkles
--------
a. Need to parse source modules to determine dependencies.
b. Processing order for modules within an SCC.
c. Must order mtimes of files to decide whether to re-process; depends
on clock never resetting.
d. from P import M; checks filesystem whether module P.M exists in
filesystem.
e. Race conditions, where somebody modifies a file while we're
processing. Solved by using a FileSystemCache.
Steps
-----
1. For each explicitly given module find the source file location.
2. For each such module load and check the cache metadata, and decide
whether it's valid.
3. Now recursively (or iteratively) find dependencies and add those to
the graph:
- for cached nodes use the list of dependencies from the cache
metadata (this will be valid even if we later end up re-parsing
the same source);
- for uncached nodes parse the file and process all imports found,
taking care of (a) above.
Step 3 should also address (d) above.
Once step 3 terminates we have the entire dependency graph, and for
each module we've either loaded the cache metadata or parsed the
source code. (However, we may still need to parse those modules for
which we have cache metadata but that depend, directly or indirectly,
on at least one module for which the cache metadata is stale.)
Now we can execute steps A-C from the first section. Finding SCCs for
step A shouldn't be hard; there's a recipe here:
https://code.activestate.com/recipes/578507/. There's also a plethora
of topsort recipes, e.g. https://code.activestate.com/recipes/577413/.
For single nodes, processing is simple. If the node was cached, we
deserialize the cache data and fix up cross-references. Otherwise, we
do semantic analysis followed by type checking. Once we (re-)processed
an SCC we check whether its interface (symbol table) is still fresh
(matches previous cached value). If it is not, we consider dependent SCCs
stale so that they need to be re-parsed as well.
Note on indirect dependencies: normally dependencies are determined from
imports, but since our interfaces are "opaque" (i.e. symbol tables can
contain cross-references as well as types identified by name), these are not
enough. We *must* also add "indirect" dependencies from symbols and types to
their definitions. For this purpose, we record all accessed symbols during
semantic analysis, and after we finished processing a module, we traverse its
type map, and for each type we find (transitively) on which named types it
depends.
Import cycles
-------------
Finally we have to decide how to handle (b), import cycles. Here
we'll need a modified version of the original state machine
(build.py), but we only need to do this per SCC, and we won't have to
deal with changes to the list of nodes while we're processing it.
If all nodes in the SCC have valid cache metadata and all dependencies
outside the SCC are still valid, we can proceed as follows:
1. Load cache data for all nodes in the SCC.
2. Fix up cross-references for all nodes in the SCC.
Otherwise, the simplest (but potentially slow) way to proceed is to
invalidate all cache data in the SCC and re-parse all nodes in the SCC
from source. We can do this as follows:
1. Parse source for all nodes in the SCC.
2. Semantic analysis for all nodes in the SCC.
3. Type check all nodes in the SCC.
(If there are more passes the process is the same -- each pass should
be done for all nodes before starting the next pass for any nodes in
the SCC.)
We could process the nodes in the SCC in any order. For sentimental
reasons, I've decided to process them in the reverse order in which we
encountered them when originally constructing the graph. That's how
the old build.py deals with cycles, and at least this reproduces the
previous implementation more accurately.
Can we do better than re-parsing all nodes in the SCC when any of its
dependencies are out of date? It's doubtful. The optimization
mentioned at the end of the previous section would require re-parsing
and type-checking a node and then comparing its symbol table to the
cached data; but because the node is part of a cycle we can't
technically type-check it until the semantic analysis of all other
nodes in the cycle has completed. (This is an important issue because
Dropbox has a very large cycle in production code. But I'd like to
deal with it later.)
Additional wrinkles
-------------------
During implementation more wrinkles were found.
- When a submodule of a package (e.g. x.y) is encountered, the parent
package (e.g. x) must also be loaded, but it is not strictly a
dependency. See State.add_ancestors() below.
"""
| BuildManager |
python | Textualize__textual | docs/examples/widgets/tree.py | {
"start": 78,
"end": 460
} | class ____(App):
def compose(self) -> ComposeResult:
tree: Tree[str] = Tree("Dune")
tree.root.expand()
characters = tree.root.add("Characters", expand=True)
characters.add_leaf("Paul")
characters.add_leaf("Jessica")
characters.add_leaf("Chani")
yield tree
if __name__ == "__main__":
app = TreeApp()
app.run()
| TreeApp |
python | huggingface__transformers | src/transformers/models/glm4v/image_processing_glm4v.py | {
"start": 3249,
"end": 24322
} | class ____(BaseImageProcessor):
r"""
Constructs a GLM-4V image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}`):
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
in the `preprocess` method. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = ["pixel_values", "image_grid_thw"]
valid_kwargs = Glm4vImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
patch_size: int = 14,
temporal_patch_size: int = 2,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
elif size is None:
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: Union[ImageInput, VideoInput],
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`List[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=temporal_patch_size,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose(0, 3, 1, 2)
if patches.shape[0] % temporal_patch_size != 0:
repeats = np.repeat(
patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size), axis=0
)
patches = np.concatenate([patches, repeats], axis=0)
channel = patches.shape[1]
grid_t = patches.shape[0] // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.reshape(
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
flatten_patches = patches.reshape(
grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
)
return flatten_patches, (grid_t, grid_h, grid_w)
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
# Try to use config values if set, otherwise fallback to global defaults
size = size if size is not None else self.size
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
elif size is None:
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
if images is not None:
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
pixel_values = np.array(pixel_values)
vision_grid_thws = np.array(vision_grid_thws)
data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws})
return BatchFeature(data=data, tensor_type=return_tensors)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
size = images_kwargs.get("size", {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000})
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
num_frames=self.temporal_patch_size,
height=height,
width=width,
factor=factor,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
temporal_factor=self.temporal_patch_size,
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
__all__ = ["Glm4vImageProcessor"]
| Glm4vImageProcessor |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 34081,
"end": 34248
} | class ____(BoringModel):
def training_step(self, batch, batch_idx):
if batch_idx == 1:
raise RuntimeError("Trouble!")
| TroubledModelInTrainingStep |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.