language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/chameleon/modeling_chameleon.py | {
"start": 32206,
"end": 33776
} | class ____:
"""
A class for mapping discrete image tokens from VQGAN to BPE tokens.
"""
def __init__(self, vocab_map):
self.vocab_map = vocab_map
self.image_token_id = vocab_map.get("<image>")
@cached_property
def val2name(self):
return {v: k for k, v in self.vocab_map.items()}
@cached_property
def image_tokens(self):
return sorted([val for name, val in self.vocab_map.items() if name.startswith("IMGIMG")])
@cached_property
def bpe2img(self):
img_tkn_chr_mapping = {chr(ord("A") + i): str(i) for i in range(10)}
def remap(old_name: str) -> str:
return "".join(img_tkn_chr_mapping.get(c, c) for c in old_name[len("IMGIMG") : -1])
return {tok: int(remap(self.val2name[tok])) for tok in self.image_tokens}
@cached_property
def img2bpe(self):
return {v: k for k, v in self.bpe2img.items()}
@cached_property
def bpe2img_search_tensors(self):
return torch.tensor(sorted(self.bpe2img.keys())), torch.tensor(sorted(self.bpe2img.values()))
@cached_property
def img2bpe_mapping_tensor(self):
mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
for k, v in self.img2bpe.items():
mapping[k] = v
return mapping
def convert_img2bpe(self, img_batch: torch.Tensor) -> torch.Tensor:
device = img_batch.device
img_tokens = self.img2bpe_mapping_tensor[img_batch.to("cpu")]
return img_tokens.to(device)
@auto_docstring
| ChameleonImageVocabularyMapping |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_tools.py | {
"start": 1060,
"end": 1361
} | class ____:
def test_tolist(self):
index = period_range(freq="Y", start="1/1/2001", end="12/1/2009")
rs = index.tolist()
for x in rs:
assert isinstance(x, Period)
recon = PeriodIndex(rs)
tm.assert_index_equal(index, recon)
| TestPeriodIndexConversion |
python | PrefectHQ__prefect | src/prefect/server/schemas/sorting.py | {
"start": 6803,
"end": 7504
} | class ____(AutoEnum):
"""Defines variables sorting options."""
CREATED_DESC = "CREATED_DESC"
UPDATED_DESC = "UPDATED_DESC"
NAME_DESC = "NAME_DESC"
NAME_ASC = "NAME_ASC"
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort task runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"CREATED_DESC": [db.Variable.created.desc()],
"UPDATED_DESC": [db.Variable.updated.desc()],
"NAME_DESC": [db.Variable.name.desc()],
"NAME_ASC": [db.Variable.name.asc()],
}
return sort_mapping[self.value]
| VariableSort |
python | getsentry__sentry | src/sentry/api/endpoints/organization_unsubscribe.py | {
"start": 1013,
"end": 2864
} | class ____(Endpoint, Generic[T]):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.HYBRID_CLOUD
authentication_classes = (SignedRequestAuthentication,)
permission_classes = ()
object_type = "unknown"
def fetch_instance(self, request: Request, organization_id_or_slug: int | str, id: int) -> T:
raise NotImplementedError()
def unsubscribe(self, request: Request, instance: T):
raise NotImplementedError()
def add_instance_data(self, data: dict[str, Any], instance: T) -> dict[str, Any]:
return data
def get(
self, request: Request, organization_id_or_slug: int | str, id: int, **kwargs
) -> Response:
if not auth.is_user_signed_request(request):
raise NotFound()
instance = self.fetch_instance(request, organization_id_or_slug, id)
view_url = ""
if hasattr(instance, "get_absolute_url"):
view_url = str(instance.get_absolute_url())
display_name = ""
user = request.user
if hasattr(user, "get_display_name"):
display_name = str(user.get_display_name())
data = {
"viewUrl": view_url,
"type": self.object_type,
"displayName": display_name,
}
return Response(self.add_instance_data(data, instance), 200)
def post(
self, request: Request, organization_id_or_slug: int | str, id: int, **kwargs
) -> Response:
if not request.user_from_signed_request:
raise NotFound()
instance = self.fetch_instance(request, organization_id_or_slug, id)
if request.data.get("cancel"):
self.unsubscribe(request, instance)
return Response(status=201)
@region_silo_endpoint
| OrganizationUnsubscribeBase |
python | zostera__django-bootstrap4 | tests/forms.py | {
"start": 556,
"end": 2903
} | class ____(forms.Form):
"""Form with a variety of widgets to test bootstrap4 rendering."""
date = forms.DateField(required=False)
datetime = forms.SplitDateTimeField(widget=AdminSplitDateTime(), required=False)
subject = forms.CharField(
max_length=100,
help_text="my_help_text",
required=True,
widget=forms.TextInput(attrs={"placeholder": "placeholdertest"}),
)
xss_field = forms.CharField(label='XSS" onmouseover="alert(\'Hello, XSS\')" foo="', max_length=100)
password = forms.CharField(widget=forms.PasswordInput)
message = forms.CharField(required=False, help_text="<i>my_help_text</i>")
sender = forms.EmailField(label="Sender © unicode", help_text='E.g., "me@example.com"')
secret = forms.CharField(initial=42, widget=forms.HiddenInput)
cc_myself = forms.BooleanField(
required=False, help_text='cc stands for "carbon copy." You will get a copy in your mailbox.'
)
select1 = forms.ChoiceField(choices=RADIO_CHOICES)
select2 = forms.MultipleChoiceField(choices=RADIO_CHOICES, help_text="Check as many as you like.")
select3 = forms.ChoiceField(choices=MEDIA_CHOICES)
select4 = forms.MultipleChoiceField(choices=MEDIA_CHOICES, help_text="Check as many as you like.")
category1 = forms.ChoiceField(choices=RADIO_CHOICES, widget=forms.RadioSelect)
category2 = forms.MultipleChoiceField(
choices=RADIO_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
category3 = forms.ChoiceField(widget=forms.RadioSelect, choices=MEDIA_CHOICES)
category4 = forms.MultipleChoiceField(
choices=MEDIA_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
category5 = forms.ChoiceField(widget=RadioSelectButtonGroup, choices=MEDIA_CHOICES)
addon = forms.CharField(widget=forms.TextInput(attrs={"addon_before": "before", "addon_after": "after"}))
polygon = gisforms.PointField()
required_css_class = "bootstrap4-req"
non_field_error_message = "This is a non field error."
# Set this to allow tests to work properly in Django 1.10+
# More information, see issue #337
use_required_attribute = False
def clean(self):
super().clean()
raise forms.ValidationError(self.non_field_error_message)
| TestForm |
python | pandas-dev__pandas | pandas/io/formats/csvs.py | {
"start": 991,
"end": 10809
} | class ____:
cols: npt.NDArray[np.object_]
def __init__(
self,
formatter: DataFrameFormatter,
path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",
sep: str = ",",
cols: Sequence[Hashable] | None = None,
index_label: IndexLabel | None = None,
mode: str = "w",
encoding: str | None = None,
errors: str = "strict",
compression: CompressionOptions = "infer",
quoting: int | None = None,
lineterminator: str | None = "\n",
chunksize: int | None = None,
quotechar: str | None = '"',
date_format: str | None = None,
doublequote: bool = True,
escapechar: str | None = None,
storage_options: StorageOptions | None = None,
) -> None:
self.fmt = formatter
self.obj = self.fmt.frame
self.filepath_or_buffer = path_or_buf
self.encoding = encoding
self.compression: CompressionOptions = compression
self.mode = mode
self.storage_options = storage_options
self.sep = sep
self.index_label = self._initialize_index_label(index_label)
self.errors = errors
self.quoting = quoting or csvlib.QUOTE_MINIMAL
self.doublequote = doublequote
self.escapechar = escapechar
self.quotechar = self._initialize_quotechar(quotechar)
self.lineterminator = lineterminator or os.linesep
self.date_format = date_format
self.cols = self._initialize_columns(cols)
self.chunksize = self._initialize_chunksize(chunksize)
@property
def na_rep(self) -> str:
return self.fmt.na_rep
@property
def float_format(self) -> FloatFormatType | None:
return self.fmt.float_format
@property
def decimal(self) -> str:
return self.fmt.decimal
@property
def header(self) -> bool | SequenceNotStr[str]:
return self.fmt.header
@property
def index(self) -> bool:
return self.fmt.index
def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:
if index_label is not False:
if index_label is None:
return self._get_index_label_from_obj()
elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)):
# given a string for a DF with Index
return [index_label]
return index_label
def _get_index_label_from_obj(self) -> Sequence[Hashable]:
if isinstance(self.obj.index, ABCMultiIndex):
return self._get_index_label_multiindex()
else:
return self._get_index_label_flat()
def _get_index_label_multiindex(self) -> Sequence[Hashable]:
return [name or "" for name in self.obj.index.names]
def _get_index_label_flat(self) -> Sequence[Hashable]:
index_label = self.obj.index.name
return [""] if index_label is None else [index_label]
def _initialize_quotechar(self, quotechar: str | None) -> str | None:
if self.quoting != csvlib.QUOTE_NONE or self.escapechar is not None:
# prevents crash in _csv
return quotechar
return None
@property
def has_mi_columns(self) -> bool:
return bool(isinstance(self.obj.columns, ABCMultiIndex))
def _initialize_columns(
self, cols: Iterable[Hashable] | None
) -> npt.NDArray[np.object_]:
# validate mi options
if self.has_mi_columns:
if cols is not None:
msg = "cannot specify cols with a MultiIndex on the columns"
raise TypeError(msg)
if cols is not None:
if isinstance(cols, ABCIndex):
cols = cols._get_values_for_csv(**self._number_format)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure cols is just a list of labels
new_cols = self.obj.columns
return new_cols._get_values_for_csv(**self._number_format)
def _initialize_chunksize(self, chunksize: int | None) -> int:
if chunksize is None:
return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1
return int(chunksize)
@property
def _number_format(self) -> dict[str, Any]:
"""Dictionary used for storing number formatting settings."""
return {
"na_rep": self.na_rep,
"float_format": self.float_format,
"date_format": self.date_format,
"quoting": self.quoting,
"decimal": self.decimal,
}
@cache_readonly
def data_index(self) -> Index:
data_index = self.obj.index
if (
isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex))
and self.date_format is not None
):
data_index = Index(
[x.strftime(self.date_format) if notna(x) else "" for x in data_index]
)
elif isinstance(data_index, ABCMultiIndex):
data_index = data_index.remove_unused_levels()
return data_index
@property
def nlevels(self) -> int:
if self.index:
return getattr(self.data_index, "nlevels", 1)
else:
return 0
@property
def _has_aliases(self) -> bool:
return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
@property
def _need_to_save_header(self) -> bool:
return bool(self._has_aliases or self.header)
@property
def write_cols(self) -> SequenceNotStr[Hashable]:
if self._has_aliases:
assert not isinstance(self.header, bool)
if len(self.header) != len(self.cols):
raise ValueError(
f"Writing {len(self.cols)} cols but got {len(self.header)} aliases"
)
return self.header
else:
# self.cols is an ndarray derived from Index._get_values_for_csv,
# so its entries are strings, i.e. hashable
return cast(SequenceNotStr[Hashable], self.cols)
@property
def encoded_labels(self) -> list[Hashable]:
encoded_labels: list[Hashable] = []
if self.index and self.index_label:
assert isinstance(self.index_label, Sequence)
encoded_labels = list(self.index_label)
if not self.has_mi_columns or self._has_aliases:
encoded_labels += list(self.write_cols)
return encoded_labels
def save(self) -> None:
"""
Create the writer & save.
"""
# apply compression and byte/text conversion
with get_handle(
self.filepath_or_buffer,
self.mode,
encoding=self.encoding,
errors=self.errors,
compression=self.compression,
storage_options=self.storage_options,
) as handles:
# Note: self.encoding is irrelevant here
# error: Argument "quoting" to "writer" has incompatible type "int";
# expected "Literal[0, 1, 2, 3]"
self.writer = csvlib.writer(
handles.handle,
lineterminator=self.lineterminator,
delimiter=self.sep,
quoting=self.quoting, # type: ignore[arg-type]
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
)
self._save()
def _save(self) -> None:
if self._need_to_save_header:
self._save_header()
self._save_body()
def _save_header(self) -> None:
if not self.has_mi_columns or self._has_aliases:
self.writer.writerow(self.encoded_labels)
else:
for row in self._generate_multiindex_header_rows():
self.writer.writerow(row)
def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]:
columns = self.obj.columns
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(self.index_label, list) and len(self.index_label) > 1:
col_line.extend([""] * (len(self.index_label) - 1))
col_line.extend(columns._get_level_values(i))
yield col_line
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if self.encoded_labels and set(self.encoded_labels) != {""}:
yield self.encoded_labels + [""] * len(columns)
def _save_body(self) -> None:
nrows = len(self.data_index)
chunks = (nrows // self.chunksize) + 1
for i in range(chunks):
start_i = i * self.chunksize
end_i = min(start_i + self.chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i: int, end_i: int) -> None:
# create the data for a chunk
slicer = slice(start_i, end_i)
df = self.obj.iloc[slicer]
res = df._get_values_for_csv(**self._number_format)
data = list(res._iter_column_arrays())
ix = (
self.data_index[slicer]._get_values_for_csv(**self._number_format)
if self.nlevels != 0
else np.empty(end_i - start_i)
)
libwriters.write_csv_rows(
data,
ix,
self.nlevels,
self.cols,
self.writer,
)
| CSVFormatter |
python | openai__openai-python | src/openai/types/beta/threads/message.py | {
"start": 764,
"end": 975
} | class ____(BaseModel):
file_id: Optional[str] = None
"""The ID of the file to attach to the message."""
tools: Optional[List[AttachmentTool]] = None
"""The tools to add this file to."""
| Attachment |
python | plotly__plotly.py | plotly/graph_objs/splom/_selected.py | {
"start": 233,
"end": 2396
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom"
_path_str = "splom.selected"
_valid_props = {"marker"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.splom.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.splom.selected.Marker`
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.Selected`
marker
:class:`plotly.graph_objects.splom.selected.Marker`
instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Selected |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_test_data.py | {
"start": 2557,
"end": 10637
} | class ____(SearchStrategy):
def do_draw(self, data):
data.draw_bytes(10**6, 10**6)
def test_does_not_double_freeze_in_interval_close():
d = ConjectureData.for_choices((b"hi",))
with pytest.raises(StopTest):
d.draw(BigStrategy())
assert d.frozen
assert not any(eg.end is None for eg in d.spans)
def test_triviality():
d = ConjectureData.for_choices((True, False, b"1"))
d.start_span(label=1)
d.draw(st.booleans())
d.draw(st.booleans())
d.stop_span()
d.start_span(label=2)
d.draw_bytes(1, 1, forced=bytes([2]))
d.stop_span()
d.freeze()
def trivial(u, v):
ex = next(ex for ex in d.spans if ex.start == u and ex.end == v)
return all(node.trivial for node in d.nodes[ex.start : ex.end])
assert not trivial(0, 2)
assert not trivial(0, 1)
assert trivial(1, 2)
assert trivial(2, 3)
def test_example_depth_marking():
d = ConjectureData.for_choices((0,) * 6)
d.draw(st.integers()) # v1
d.start_span(0)
d.draw(st.integers()) # v2
d.draw(st.integers()) # v3
d.stop_span()
d.draw(st.integers()) # v4
d.freeze()
assert len(d.spans) == 6
depths = [(ex.choice_count, ex.depth) for ex in d.spans]
assert depths == [
(4, 0), # top
(1, 1), # v1
(2, 1), # inner
(1, 2), # v2
(1, 2), # v3
(1, 1), # v4
]
def test_has_examples_even_when_empty():
d = ConjectureData.for_choices([])
d.draw(st.just(False))
d.freeze()
assert d.spans
def test_has_cached_examples_even_when_overrun():
d = ConjectureData.for_choices((False,))
d.start_span(3)
d.draw_boolean()
d.stop_span()
try:
d.draw_boolean()
except StopTest:
pass
assert d.status == Status.OVERRUN
assert any(ex.label == 3 and ex.choice_count == 1 for ex in d.spans)
assert d.spans is d.spans
def test_can_observe_draws():
class LoggingObserver(DataObserver):
def __init__(self):
self.log = []
def draw_boolean(self, value: bool, *, was_forced: bool, constraints: dict):
self.log.append(("draw_boolean", value, was_forced))
def draw_integer(self, value: int, *, was_forced: bool, constraints: dict):
self.log.append(("draw_integer", value, was_forced))
def conclude_test(self, *args):
assert d.frozen
self.log.append(("concluded", *args))
observer = LoggingObserver()
d = ConjectureData.for_choices((True, 1, 3), observer=observer)
origin = interesting_origin()
d.draw_boolean()
d.draw_integer(0, 2**7 - 1, forced=10)
d.draw_integer(0, 2**8 - 1)
with pytest.raises(StopTest):
d.conclude_test(Status.INTERESTING, interesting_origin=origin)
assert observer.log == [
("draw_boolean", True, False),
("draw_integer", 10, True),
("draw_integer", 3, False),
("concluded", Status.INTERESTING, origin),
]
def test_calls_concluded_implicitly():
class NoteConcluded(DataObserver):
def conclude_test(self, status, reason):
assert d.frozen
self.conclusion = (status, reason)
observer = NoteConcluded()
d = ConjectureData.for_choices((True,), observer=observer)
d.draw_boolean()
d.freeze()
assert observer.conclusion == (Status.VALID, None)
def test_examples_show_up_as_discarded():
d = ConjectureData.for_choices((True, False, True))
d.start_span(1)
d.draw_boolean()
d.stop_span(discard=True)
d.start_span(1)
d.draw_boolean()
d.stop_span()
d.freeze()
assert len([ex for ex in d.spans if ex.discarded]) == 1
def test_examples_support_negative_indexing():
d = ConjectureData.for_choices((True, True))
d.draw(st.booleans())
d.draw(st.booleans())
d.freeze()
assert d.spans[-1].choice_count == 1
def test_examples_out_of_bounds_index():
d = ConjectureData.for_choices((False,))
d.draw(st.booleans())
d.freeze()
with pytest.raises(IndexError):
d.spans[10]
def test_can_override_label():
d = ConjectureData.for_choices((False,))
d.draw(st.booleans(), label=7)
d.freeze()
assert any(ex.label == 7 for ex in d.spans)
def test_will_mark_too_deep_examples_as_invalid():
d = ConjectureData.for_choices((0,))
s = st.integers()
for _ in range(MAX_DEPTH + 1):
s = s.map(lambda x: None)
with pytest.raises(StopTest):
d.draw(s)
assert d.status == Status.INVALID
def test_empty_strategy_is_invalid():
d = ConjectureData.for_choices([])
with pytest.raises(StopTest):
d.draw(st.nothing())
assert d.status == Status.INVALID
def test_can_note_non_str():
d = ConjectureData.for_choices([])
x = object()
d.note(x)
assert repr(x) in d.output
def test_can_note_str_as_non_repr():
d = ConjectureData.for_choices([])
d.note("foo")
assert d.output == "foo"
def test_result_is_overrun():
d = ConjectureData.for_choices([])
with pytest.raises(StopTest):
d.draw_boolean()
assert d.as_result() is Overrun
def test_trivial_before_force_agrees_with_trivial_after():
d = ConjectureData.for_choices((False, True, True))
d.draw_boolean()
d.draw_boolean(forced=True)
d.draw_boolean()
t1 = [d.nodes[i].trivial for i in range(3)]
d.freeze()
r = d.as_result()
t2 = [n.trivial for n in r.nodes]
t3 = [r.nodes[i].trivial for i in range(3)]
assert t1 == t2 == t3
def test_events_are_noted():
d = ConjectureData.for_choices([])
d.events["hello"] = ""
assert "hello" in d.events
def test_child_indices():
d = ConjectureData.for_choices((True,) * 4)
d.start_span(0) # examples[1]
d.start_span(1) # examples[2]
d.draw(st.booleans()) # examples[3] (st.booleans)
d.draw(st.booleans()) # examples[4] (st.booleans)
d.stop_span()
d.stop_span()
d.draw(st.booleans()) # examples[5] (st.booleans)
d.draw(st.booleans()) # examples[6] (st.booleans)
d.freeze()
assert list(d.spans.children[0]) == [1, 5, 6]
assert list(d.spans.children[1]) == [2]
assert list(d.spans.children[2]) == [3, 4]
assert d.spans[0].parent is None
for ex in list(d.spans)[1:]:
assert ex in d.spans[ex.parent].children
def test_example_equality():
d = ConjectureData.for_choices((False, False))
d.start_span(0)
d.draw_boolean()
d.stop_span()
d.start_span(0)
d.draw_boolean()
d.stop_span()
d.freeze()
examples = list(d.spans)
for ex1, ex2 in itertools.combinations(examples, 2):
assert ex1 != ex2
assert not (ex1 == ex2) # noqa
for ex in examples:
assert ex == ex
assert not (ex != ex) # noqa
assert not (ex == "hello") # noqa
assert ex != "hello"
def test_structural_coverage_is_cached():
assert structural_coverage(50) is structural_coverage(50)
def test_examples_create_structural_coverage():
data = ConjectureData.for_choices([])
data.start_span(42)
data.stop_span()
data.freeze()
assert structural_coverage(42) in data.tags
def test_discarded_examples_do_not_create_structural_coverage():
data = ConjectureData.for_choices([])
data.start_span(42)
data.stop_span(discard=True)
data.freeze()
assert structural_coverage(42) not in data.tags
def test_children_of_discarded_examples_do_not_create_structural_coverage():
data = ConjectureData.for_choices([])
data.start_span(10)
data.start_span(42)
data.stop_span()
data.stop_span(discard=True)
data.freeze()
assert structural_coverage(42) not in data.tags
assert structural_coverage(10) not in data.tags
def test_overruns_at_exactly_max_length():
with buffer_size_limit(1):
data = ConjectureData(prefix=[True], random=None, max_choices=1)
data.draw_boolean()
try:
data.draw_boolean()
except StopTest:
pass
assert data.status is Status.OVERRUN
| BigStrategy |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/vertex_ai/test_experiment_service.py | {
"start": 2730,
"end": 3726
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("ExperimentHook"))
def test_execute(self, mock_hook):
op = DeleteExperimentOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
delete_backing_tensorboard_runs=TEST_DELETE_BACKING_TENSORBOARD_RUNS,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_experiment.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
delete_backing_tensorboard_runs=TEST_DELETE_BACKING_TENSORBOARD_RUNS,
)
| TestVertexAIDeleteExperimentOperator |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 96493,
"end": 97082
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
content: Optional[str] = Field(None, description="Content of the view.")
name: Optional[str] = Field(
None,
description=(
"Name of the view item. In the case of code view, it would be the"
" notebook’s name. In the case of dashboard view, it would be the"
" dashboard’s name."
),
)
type: Optional[ViewType] = Field(None, description="Type of the view item.")
| ViewItem |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_dash_address.py | {
"start": 1896,
"end": 4640
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid Dashcoin addresses."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"XsVkhTxLjzdXP1xZWtEFRj1mDhWcU6d8tE",
"yPv7h2i8v3dJjfSH4L3x91JSJszjdbsJJA",
"Xty4Q4B1CCm1qA4sMFkmczZqCtftFJuEse",
"XwLKjRCfGXuq598vCod8MKsyi83QbLJfiy",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_dash_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidDashAddress().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidDashAddress |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/override1.py | {
"start": 1859,
"end": 1941
} | class ____(F):
@override
@evil_wrapper
def method1(self):
pass
| G |
python | kubernetes-client__python | kubernetes/base/dynamic/resource.py | {
"start": 3841,
"end": 8538
} | class ____(Resource):
""" Represents a list of API objects """
def __init__(self, client, group='', api_version='v1', base_kind='', kind=None, base_resource_lookup=None):
self.client = client
self.group = group
self.api_version = api_version
self.kind = kind or '{}List'.format(base_kind)
self.base_kind = base_kind
self.base_resource_lookup = base_resource_lookup
self.__base_resource = None
def base_resource(self):
if self.__base_resource:
return self.__base_resource
elif self.base_resource_lookup:
self.__base_resource = self.client.resources.get(**self.base_resource_lookup)
return self.__base_resource
elif self.base_kind:
self.__base_resource = self.client.resources.get(group=self.group, api_version=self.api_version, kind=self.base_kind)
return self.__base_resource
return None
def _items_to_resources(self, body):
""" Takes a List body and return a dictionary with the following structure:
{
'api_version': str,
'kind': str,
'items': [{
'resource': Resource,
'name': str,
'namespace': str,
}]
}
"""
if body is None:
raise ValueError("You must provide a body when calling methods on a ResourceList")
api_version = body['apiVersion']
kind = body['kind']
items = body.get('items')
if not items:
raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList')
if self.kind != kind:
raise ValueError('Methods on a {} must be called with a body containing the same kind. Received {} instead'.format(self.kind, kind))
return {
'api_version': api_version,
'kind': kind,
'items': [self._item_to_resource(item) for item in items]
}
def _item_to_resource(self, item):
metadata = item.get('metadata', {})
resource = self.base_resource()
if not resource:
api_version = item.get('apiVersion', self.api_version)
kind = item.get('kind', self.base_kind)
resource = self.client.resources.get(api_version=api_version, kind=kind)
return {
'resource': resource,
'definition': item,
'name': metadata.get('name'),
'namespace': metadata.get('namespace')
}
def get(self, body, name=None, namespace=None, **kwargs):
if name:
raise ValueError('Operations on ResourceList objects do not support the `name` argument')
resource_list = self._items_to_resources(body)
response = copy.deepcopy(body)
response['items'] = [
item['resource'].get(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
for item in resource_list['items']
]
return ResourceInstance(self, response)
def delete(self, body, name=None, namespace=None, **kwargs):
if name:
raise ValueError('Operations on ResourceList objects do not support the `name` argument')
resource_list = self._items_to_resources(body)
response = copy.deepcopy(body)
response['items'] = [
item['resource'].delete(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
for item in resource_list['items']
]
return ResourceInstance(self, response)
def verb_mapper(self, verb, body, **kwargs):
resource_list = self._items_to_resources(body)
response = copy.deepcopy(body)
response['items'] = [
getattr(item['resource'], verb)(body=item['definition'], **kwargs).to_dict()
for item in resource_list['items']
]
return ResourceInstance(self, response)
def create(self, *args, **kwargs):
return self.verb_mapper('create', *args, **kwargs)
def replace(self, *args, **kwargs):
return self.verb_mapper('replace', *args, **kwargs)
def patch(self, *args, **kwargs):
return self.verb_mapper('patch', *args, **kwargs)
def to_dict(self):
return {
'_type': 'ResourceList',
'group': self.group,
'api_version': self.api_version,
'kind': self.kind,
'base_kind': self.base_kind
}
def __getattr__(self, name):
if self.base_resource():
return getattr(self.base_resource(), name)
return None
| ResourceList |
python | has2k1__plotnine | plotnine/iapi.py | {
"start": 866,
"end": 1308
} | class ____:
"""
Scale information after it has been trained
"""
scale: scale
aesthetics: Sequence[ScaledAestheticsName]
name: Optional[str]
# Trained limits of the scale
limits: tuple[float, float] | Sequence[str]
# Physical size of scale, including expansions
range: CoordRange
breaks: Sequence[float] | Sequence[str]
minor_breaks: FloatArrayLike
labels: Sequence[str]
@dataclass
| scale_view |
python | django__django | tests/staticfiles_tests/cases.py | {
"start": 265,
"end": 1578
} | class ____:
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(filepath),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
with self.assertRaises(OSError):
self._get_file(filepath)
def render_template(self, template, **kwargs):
if isinstance(template, str):
template = Template(template)
return template.render(Context(**kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return (
"{%% load static from static %%}{%% static '%s' as var %%}{{ var }}"
% path
)
return "{%% load static from static %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
with self.assertRaises(exc):
self.assertStaticRenders(path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
| BaseStaticFilesMixin |
python | django__django | django/db/models/functions/text.py | {
"start": 7192,
"end": 7898
} | class ____(Func):
function = "REPEAT"
output_field = CharField()
def __init__(self, expression, number, **extra):
if (
not hasattr(number, "resolve_expression")
and number is not None
and number < 0
):
raise ValueError("'number' must be greater or equal to 0.")
super().__init__(expression, number, **extra)
def as_oracle(self, compiler, connection, **extra_context):
expression, number = self.source_expressions
length = None if number is None else Length(expression) * number
rpad = RPad(expression, length, expression)
return rpad.as_sql(compiler, connection, **extra_context)
| Repeat |
python | cherrypy__cherrypy | cherrypy/test/modwsgi.py | {
"start": 3105,
"end": 5124
} | class ____(helper.Supervisor):
"""Server Controller for ModWSGI and CherryPy."""
using_apache = True
using_wsgi = True
template = conf_modwsgi
def __str__(self):
"""Render a :class:`ModWSGISupervisor` instance as a string."""
return 'ModWSGI Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
"""Spawn an Apache ``mod_wsgi`` supervisor process."""
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
with open(mpconf, 'wb') as f:
output = self.template % {
'port': self.port,
'testmod': modulename,
'curdir': curdir,
}
f.write(output)
result = read_process(APACHE_PATH, '-k start -f %s' % mpconf)
if result:
print(result)
# Make a request so mod_wsgi starts up our app.
# If we don't, concurrent initial requests will 404.
portend.occupied('127.0.0.1', self.port, timeout=5)
webtest.openURL('/ihopetheresnodefault', port=self.port)
time.sleep(1)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
loaded = False
def application(environ, start_response):
"""Respond to a WSGI-interfaced HTTP request via test module."""
global loaded
if not loaded:
loaded = True
modname = 'cherrypy.test.' + environ['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.config.update(
{
'log.error_file': os.path.join(curdir, 'test.error.log'),
'log.access_file': os.path.join(curdir, 'test.access.log'),
'environment': 'test_suite',
'engine.SIGHUP': None,
'engine.SIGTERM': None,
},
)
return cherrypy.tree(environ, start_response)
| ModWSGISupervisor |
python | django__django | tests/generic_views/views.py | {
"start": 7427,
"end": 7514
} | class ____(BookSigningConfig, generic.MonthArchiveView):
pass
| BookSigningMonthArchive |
python | pennersr__django-allauth | allauth/socialaccount/providers/yahoo/provider.py | {
"start": 319,
"end": 1199
} | class ____(OAuth2Provider):
id = "yahoo"
name = "Yahoo"
account_class = YahooAccount
oauth2_adapter_class = YahooOAuth2Adapter
def get_default_scope(self):
"""
Doc on scopes available at
https://developer.yahoo.com/oauth2/guide/yahoo_scopes/
"""
return ["profile", "email"]
def extract_uid(self, data):
return data["sub"]
def extract_common_fields(self, data):
return dict(
email=data["email"],
last_name=data["family_name"],
first_name=data["given_name"],
)
def extract_email_addresses(self, data):
ret = []
email = data.get("email")
if email and data.get("email_verified"):
ret.append(EmailAddress(email=email, verified=True, primary=True))
return ret
provider_classes = [YahooProvider]
| YahooProvider |
python | tensorflow__tensorflow | tensorflow/python/util/module_wrapper_test.py | {
"start": 1080,
"end": 1125
} | class ____(types.ModuleType):
pass
| MockModule |
python | django__django | tests/validation/models.py | {
"start": 2052,
"end": 2377
} | class ____(models.Model):
start_date = models.DateField()
end_date = models.DateTimeField()
count = models.IntegerField(
unique_for_date="start_date", unique_for_year="end_date"
)
order = models.IntegerField(unique_for_month="end_date")
name = models.CharField(max_length=100)
| UniqueForDateModel |
python | ray-project__ray | python/ray/data/datasource/datasource.py | {
"start": 15168,
"end": 17842
} | class ____(Datasource):
"""An example datasource that generates rows with random int64 columns.
Examples:
>>> import ray
>>> from ray.data.datasource import RandomIntRowDatasource
>>> source = RandomIntRowDatasource() # doctest: +SKIP
>>> ray.data.read_datasource( # doctest: +SKIP
... source, n=10, num_columns=2).take()
{'c_0': 1717767200176864416, 'c_1': 999657309586757214}
{'c_0': 4983608804013926748, 'c_1': 1160140066899844087}
"""
def __init__(self, n: int, num_columns: int):
"""Initialize the datasource that generates random-integer rows.
Args:
n: The number of rows to generate.
num_columns: The number of columns to generate.
"""
self._n = n
self._num_columns = num_columns
def estimate_inmemory_data_size(self) -> Optional[int]:
return self._n * self._num_columns * 8
def get_read_tasks(
self,
parallelism: int,
per_task_row_limit: Optional[int] = None,
) -> List[ReadTask]:
_check_pyarrow_version()
import pyarrow
read_tasks: List[ReadTask] = []
n = self._n
num_columns = self._num_columns
block_size = max(1, n // parallelism)
def make_block(count: int, num_columns: int) -> Block:
return pyarrow.Table.from_arrays(
np.random.randint(
np.iinfo(np.int64).max, size=(num_columns, count), dtype=np.int64
),
names=[f"c_{i}" for i in range(num_columns)],
)
schema = pyarrow.Table.from_pydict(
{f"c_{i}": [0] for i in range(num_columns)}
).schema
i = 0
while i < n:
count = min(block_size, n - i)
meta = BlockMetadata(
num_rows=count,
size_bytes=8 * count * num_columns,
input_files=None,
exec_stats=None,
)
read_tasks.append(
ReadTask(
lambda count=count, num_columns=num_columns: [
make_block(count, num_columns)
],
meta,
schema=schema,
per_task_row_limit=per_task_row_limit,
)
)
i += block_size
return read_tasks
def get_name(self) -> str:
"""Return a human-readable name for this datasource.
This will be used as the names of the read tasks.
Note: overrides the base `Datasource` method.
"""
return "RandomInt"
| RandomIntRowDatasource |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 20718,
"end": 22959
} | class ____(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
with ops.device(device):
token = create_ordering_token()
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.],
group_size,
group_key,
instance_key,
ordering_token=token)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
def testReduceSameGraph(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
@def_function.function(jit_compile=True)
def func():
def all_reduce(device):
with ops.device(device):
token = create_ordering_token()
return _collective_ops.all_reduce_v2([1.],
group_size,
group_key,
instance_key,
ordering_token=token)
results.append(all_reduce(device0))
results.append(all_reduce(device1))
return results
# FIXME(b/204228837): the error shall no longer be about resources
# after multi-device support in jit_compile lands. This will likely
# becomes a deadlock near ResolveDeviceAssignment, or an error in the MLIR
# bridge on resetting CollectiveInfo.
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Trying to access resource'):
func()
@combinations.generate(
combinations.combine(
required_physical_gpus=2, mode='eager', jit_compile=[True, False]))
| XlaTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-tplcentral/source_tplcentral/streams.py | {
"start": 5653,
"end": 6735
} | class ____(IncrementalTplcentralStream):
# https://api.3plcentral.com/rels/inventory/stockdetails
upstream_primary_key = "ReceiveItemId"
upstream_cursor_field = "ReceivedDate"
collection_field = "ResourceList"
page_size = 500
def path(self, **kwargs) -> str:
return "inventory/stockdetails"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
params.update(
{
"customerid": self.customer_id,
"facilityid": self.facility_id,
"sort": self.upstream_cursor_field,
}
)
cursor = stream_slice.get(self.cursor_field)
if cursor:
params.update({"rql": f"{self.upstream_cursor_field}=ge={cursor}"})
return params
| StockDetails |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 2964,
"end": 3109
} | class ____(DeploymentAction):
"""Resumes the given Deployment"""
type: Literal["resume-deployment"] = "resume-deployment"
| ResumeDeployment |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/metadata.py | {
"start": 504,
"end": 1572
} | class ____(MetadataCheck):
name = f"Connectors must have valid {consts.METADATA_FILE_NAME} file"
description = f"Connectors must have a `{consts.METADATA_FILE_NAME}` file at the root of their directory. This file is used to build our connector registry. Its structure must follow our metadata schema. Field values are also validated. This is to ensure that all connectors have the required metadata fields and that the metadata is valid. More details in this [documentation]({consts.METADATA_DOCUMENTATION_URL})."
def _run(self, connector: Connector) -> CheckResult:
deserialized_metadata, error = validate_and_load(
connector.metadata_file_path,
PRE_UPLOAD_VALIDATORS,
ValidatorOptions(docs_path=str(connector.documentation_file_path)),
)
if not deserialized_metadata:
return self.fail(connector=connector, message=f"Metadata file is invalid: {error}")
return self.pass_(
connector=connector,
message="Metadata file valid.",
)
| ValidateMetadata |
python | donnemartin__interactive-coding-challenges | graphs_trees/invert_tree/test_invert_tree.py | {
"start": 18,
"end": 848
} | class ____(unittest.TestCase):
def test_invert_tree(self):
root = Node(5)
bst = InverseBst(root)
node2 = bst.insert(2)
node3 = bst.insert(3)
node1 = bst.insert(1)
node7 = bst.insert(7)
node6 = bst.insert(6)
node9 = bst.insert(9)
result = bst.invert_tree()
self.assertEqual(result, root)
self.assertEqual(result.left, node7)
self.assertEqual(result.right, node2)
self.assertEqual(result.left.left, node9)
self.assertEqual(result.left.right, node6)
self.assertEqual(result.right.left, node3)
self.assertEqual(result.right.right, node1)
print('Success: test_invert_tree')
def main():
test = TestInvertTree()
test.test_invert_tree()
if __name__ == '__main__':
main()
| TestInvertTree |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 1899,
"end": 1946
} | class ____:
__slots__ = func()
| PotentiallyGood |
python | nedbat__coveragepy | tests/mixins.py | {
"start": 4051,
"end": 5056
} | class ____:
"""
Adapter from the pytest capsys fixture to more convenient methods.
This doesn't also output to the real stdout, so we probably want to move
to "real" capsys when we can use fixtures in test methods.
Once you've used one of these methods, the capturing is reset, so another
invocation will only return the delta.
"""
@pytest.fixture(autouse=True)
def _capcapsys(self, capsys: pytest.CaptureFixture[str]) -> None:
"""Grab the fixture so our methods can use it."""
self.capsys = capsys
def stdouterr(self) -> tuple[str, str]:
"""Returns (out, err), two strings for stdout and stderr."""
return cast(tuple[str, str], self.capsys.readouterr())
def stdout(self) -> str:
"""Returns a string, the captured stdout."""
return self.capsys.readouterr().out
def stderr(self) -> str:
"""Returns a string, the captured stderr."""
return self.capsys.readouterr().err
| StdStreamCapturingMixin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py | {
"start": 1257,
"end": 1430
} | class ____:
def __init__(self, foo:int, bar:list):
self.foo = foo
self.bar = bar
self.spam = " - ".join([foo, bar])
| NoWarningsComplicatedAssignment |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/vt100.py | {
"start": 463,
"end": 6586
} | class ____(Input):
"""
Vt100 input for Posix systems.
(This uses a posix file descriptor that can be registered in the event loop.)
"""
# For the error messages. Only display "Input is not a terminal" once per
# file descriptor.
_fds_not_a_terminal: set[int] = set()
def __init__(self, stdin: TextIO) -> None:
# Test whether the given input object has a file descriptor.
# (Idle reports stdin to be a TTY, but fileno() is not implemented.)
try:
# This should not raise, but can return 0.
stdin.fileno()
except io.UnsupportedOperation as e:
if "idlelib.run" in sys.modules:
raise io.UnsupportedOperation(
"Stdin is not a terminal. Running from Idle is not supported."
) from e
else:
raise io.UnsupportedOperation("Stdin is not a terminal.") from e
# Even when we have a file descriptor, it doesn't mean it's a TTY.
# Normally, this requires a real TTY device, but people instantiate
# this class often during unit tests as well. They use for instance
# pexpect to pipe data into an application. For convenience, we print
# an error message and go on.
isatty = stdin.isatty()
fd = stdin.fileno()
if not isatty and fd not in Vt100Input._fds_not_a_terminal:
msg = "Warning: Input is not a terminal (fd=%r).\n"
sys.stderr.write(msg % fd)
sys.stderr.flush()
Vt100Input._fds_not_a_terminal.add(fd)
#
self.stdin = stdin
# Create a backup of the fileno(). We want this to work even if the
# underlying file is closed, so that `typeahead_hash()` keeps working.
self._fileno = stdin.fileno()
self._buffer: list[KeyPress] = [] # Buffer to collect the Key objects.
self.stdin_reader = PosixStdinReader(self._fileno, encoding=stdin.encoding)
self.vt100_parser = Vt100Parser(
lambda key_press: self._buffer.append(key_press)
)
def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:
"""
Return a context manager that makes this input active in the current
event loop.
"""
return _attached_input(self, input_ready_callback)
def detach(self) -> ContextManager[None]:
"""
Return a context manager that makes sure that this input is not active
in the current event loop.
"""
return _detached_input(self)
def read_keys(self) -> list[KeyPress]:
"Read list of KeyPress."
# Read text from stdin.
data = self.stdin_reader.read()
# Pass it through our vt100 parser.
self.vt100_parser.feed(data)
# Return result.
result = self._buffer
self._buffer = []
return result
def flush_keys(self) -> list[KeyPress]:
"""
Flush pending keys and return them.
(Used for flushing the 'escape' key.)
"""
# Flush all pending keys. (This is most important to flush the vt100
# 'Escape' key early when nothing else follows.)
self.vt100_parser.flush()
# Return result.
result = self._buffer
self._buffer = []
return result
@property
def closed(self) -> bool:
return self.stdin_reader.closed
def raw_mode(self) -> ContextManager[None]:
return raw_mode(self.stdin.fileno())
def cooked_mode(self) -> ContextManager[None]:
return cooked_mode(self.stdin.fileno())
def fileno(self) -> int:
return self.stdin.fileno()
def typeahead_hash(self) -> str:
return f"fd-{self._fileno}"
_current_callbacks: dict[
tuple[AbstractEventLoop, int], Callable[[], None] | None
] = {} # (loop, fd) -> current callback
@contextlib.contextmanager
def _attached_input(
input: Vt100Input, callback: Callable[[], None]
) -> Generator[None, None, None]:
"""
Context manager that makes this input active in the current event loop.
:param input: :class:`~prompt_toolkit.input.Input` object.
:param callback: Called when the input is ready to read.
"""
loop = get_running_loop()
fd = input.fileno()
previous = _current_callbacks.get((loop, fd))
def callback_wrapper() -> None:
"""Wrapper around the callback that already removes the reader when
the input is closed. Otherwise, we keep continuously calling this
callback, until we leave the context manager (which can happen a bit
later). This fixes issues when piping /dev/null into a prompt_toolkit
application."""
if input.closed:
loop.remove_reader(fd)
callback()
try:
loop.add_reader(fd, callback_wrapper)
except PermissionError:
# For `EPollSelector`, adding /dev/null to the event loop will raise
# `PermissionError` (that doesn't happen for `SelectSelector`
# apparently). Whenever we get a `PermissionError`, we can raise
# `EOFError`, because there's not more to be read anyway. `EOFError` is
# an exception that people expect in
# `prompt_toolkit.application.Application.run()`.
# To reproduce, do: `ptpython 0< /dev/null 1< /dev/null`
raise EOFError
_current_callbacks[loop, fd] = callback
try:
yield
finally:
loop.remove_reader(fd)
if previous:
loop.add_reader(fd, previous)
_current_callbacks[loop, fd] = previous
else:
del _current_callbacks[loop, fd]
@contextlib.contextmanager
def _detached_input(input: Vt100Input) -> Generator[None, None, None]:
loop = get_running_loop()
fd = input.fileno()
previous = _current_callbacks.get((loop, fd))
if previous:
loop.remove_reader(fd)
_current_callbacks[loop, fd] = None
try:
yield
finally:
if previous:
loop.add_reader(fd, previous)
_current_callbacks[loop, fd] = previous
| Vt100Input |
python | tornadoweb__tornado | demos/websocket/chatdemo.py | {
"start": 909,
"end": 1400
} | class ____(tornado.web.Application):
def __init__(self):
handlers = [(r"/", MainHandler), (r"/chatsocket", ChatSocketHandler)]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
super().__init__(handlers, **settings)
| Application |
python | huggingface__transformers | src/transformers/feature_extraction_sequence_utils.py | {
"start": 996,
"end": 18982
} | class ____(FeatureExtractionMixin):
"""
This is a general feature extraction class for speech recognition.
Args:
feature_size (`int`):
The feature dimension of the extracted features.
sampling_rate (`int`):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`):
The value that is used to fill the padding values / vectors.
"""
def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.padding_side = kwargs.pop("padding_side", "right")
self.return_attention_mask = kwargs.pop("return_attention_mask", True)
super().__init__(**kwargs)
def pad(
self,
processed_features: Union[
BatchFeature,
list[BatchFeature],
dict[str, BatchFeature],
dict[str, list[BatchFeature]],
list[dict[str, BatchFeature]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
"""
Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
max sequence length in the batch.
Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
`self.padding_value`)
<Tip>
If the `processed_features` passed are dictionary of numpy arrays or PyTorch tensors the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
processed_features ([`BatchFeature`], list of [`BatchFeature`], `dict[str, list[float]]`, `dict[str, list[list[float]]` or `list[dict[str, list[float]]]`):
Processed inputs. Can represent one input ([`BatchFeature`] or `dict[str, list[float]]`) or a batch of
input values / vectors (list of [`BatchFeature`], *dict[str, list[list[float]]]* or *list[dict[str,
list[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `list[float]` you can have tensors (numpy arrays or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
# Call .keys() explicitly for compatibility with TensorDict and other Mapping subclasses
processed_features = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys())}"
)
required_input = processed_features[self.model_input_names[0]]
return_attention_mask = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(required_input) == 0:
if return_attention_mask:
processed_features["attention_mask"] = []
return processed_features
# If we have PyTorch tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if return_tensors is None:
if is_torch_tensor(first_element):
return_tensors = "pt"
elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
return_tensors = "np"
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
"Should be one of a python, numpy, or pytorch object."
)
for key, value in processed_features.items():
if isinstance(value[0], (int, float)):
processed_features[key] = to_numpy(value)
else:
processed_features[key] = [to_numpy(v) for v in value]
# Convert padding_strategy in PaddingStrategy
padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
required_input = processed_features[self.model_input_names[0]]
batch_size = len(required_input)
if not all(len(v) == batch_size for v in processed_features.values()):
raise ValueError("Some items in the output dictionary have a different batch size than others.")
truncated_inputs = []
for i in range(batch_size):
inputs = {k: v[i] for k, v in processed_features.items()}
# truncation
inputs_slice = self._truncate(
inputs,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
truncation=truncation,
)
truncated_inputs.append(inputs_slice)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
# padding
outputs = self._pad(
truncated_inputs[i],
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
if value.dtype is np.dtype(np.float64):
value = value.astype(np.float32)
batch_outputs[key].append(value)
return BatchFeature(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
processed_features: Union[dict[str, np.ndarray], BatchFeature],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad inputs (on left/right and up to predefined length or max length in the batch)
Args:
processed_features (`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see below)
padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The feature_extractor padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of (`int`, *optional*):
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Set to False to avoid returning attention mask (default: set to model specifics)
"""
required_input = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
processed_features["attention_mask"] = np.pad(
processed_features["attention_mask"], (0, difference)
)
padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
processed_features[self.model_input_names[0]] = np.pad(
required_input, padding_shape, "constant", constant_values=self.padding_value
)
elif self.padding_side == "left":
if return_attention_mask:
processed_features["attention_mask"] = np.pad(
processed_features["attention_mask"], (difference, 0)
)
padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
processed_features[self.model_input_names[0]] = np.pad(
required_input, padding_shape, "constant", constant_values=self.padding_value
)
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return processed_features
def _truncate(
self,
processed_features: Union[dict[str, np.ndarray], BatchFeature],
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
truncation: Optional[bool] = None,
):
"""
Truncate inputs to predefined length or max length in the batch
Args:
processed_features(`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
maximum length of the returned list and optionally padding length (see below)
pad_to_multiple_of (`int`, *optional*) :
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
required_input = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_truncated = len(required_input) > max_length
if needs_to_be_truncated:
processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
processed_features["attention_mask"] = processed_features["attention_mask"][:max_length]
return processed_features
def _get_padding_strategies(self, padding=False, max_length=None):
"""
Find the correct padding strategy
"""
# Get padding strategy
if padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined"
)
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`."
)
return padding_strategy
def fetch_audio(self, audio_url_or_urls: Union[str, list[str], list[list[str]]]):
"""
Convert a single or a list of urls into the corresponding `np.ndarray` objects.
If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
returned.
"""
if isinstance(audio_url_or_urls, list):
return [self.fetch_audio(x) for x in audio_url_or_urls]
elif isinstance(audio_url_or_urls, str):
return load_audio(audio_url_or_urls)
elif is_valid_audio(audio_url_or_urls):
return audio_url_or_urls
else:
raise TypeError(f"only a single or a list of entries is supported but got type={type(audio_url_or_urls)}")
| SequenceFeatureExtractor |
python | dask__distributed | distributed/tests/test_utils_test.py | {
"start": 9023,
"end": 33708
} | class ____(Server):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.handlers["ping"] = self.pong
self.counter = 0
def pong(self, comm):
self.counter += 1
return "pong"
@gen_test()
async def test_locked_comm_drop_in_replacement(loop):
async with MyServer({}) as a, await MyServer({}) as b:
await a.listen(0)
read_event = asyncio.Event()
read_event.set()
read_queue = asyncio.Queue()
original_pool = a.rpc
a.rpc = _LockedCommPool(
original_pool, read_event=read_event, read_queue=read_queue
)
await b.listen(0)
# Event is set, the pool works like an ordinary pool
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
assert res == "pong"
assert b.counter == 1
read_event.clear()
# Can also be used without a lock to intercept network traffic
a.rpc = _LockedCommPool(original_pool, read_queue=read_queue)
a.rpc.remove(b.address)
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
@gen_test()
async def test_locked_comm_intercept_read(loop):
async with MyServer({}) as a, MyServer({}) as b:
await a.listen(0)
await b.listen(0)
read_event = asyncio.Event()
read_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, read_event=read_event, read_queue=read_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
# We didn't block the write but merely the read. The remove should have
# received the message and responded already
while not b.counter:
await asyncio.sleep(0.001)
with pytest.raises(asyncio.TimeoutError):
await wait_for(asyncio.shield(fut), 0.01)
assert await read_queue.get() == (b.address, "pong")
read_event.set()
assert await fut == "pong"
@gen_test()
async def test_locked_comm_intercept_write(loop):
async with MyServer({}) as a, MyServer({}) as b:
await a.listen(0)
await b.listen(0)
write_event = asyncio.Event()
write_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, write_event=write_event, write_queue=write_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
with pytest.raises(asyncio.TimeoutError):
await wait_for(asyncio.shield(fut), 0.01)
# Write was blocked. The remote hasn't received the message, yet
assert b.counter == 0
assert await write_queue.get() == (b.address, {"op": "ping", "reply": True})
write_event.set()
assert await fut == "pong"
def test_assert_story():
now = time()
story = [
("foo", "id1", now - 600),
("bar", "id2", now),
("baz", {1: 2}, "id2", now),
]
# strict=False
assert_story(story, [("foo",), ("bar",), ("baz", {1: 2})])
assert_story(story, [])
assert_story(story, [("foo",)])
assert_story(story, [("foo",), ("bar",)])
assert_story(story, [("baz", lambda d: d[1] == 2)])
with pytest.raises(AssertionError):
assert_story(story, [("foo", "nomatch")])
with pytest.raises(AssertionError):
assert_story(story, [("baz",)])
with pytest.raises(AssertionError):
assert_story(story, [("baz", {1: 3})])
with pytest.raises(AssertionError):
assert_story(story, [("foo",), ("bar",), ("baz", "extra"), ("+1",)])
with pytest.raises(AssertionError):
assert_story(story, [("baz", lambda d: d[1] == 3)])
with pytest.raises(KeyError): # Faulty lambda
assert_story(story, [("baz", lambda d: d[2] == 1)])
assert_story([], [])
assert_story([("foo", "id1", now)], [("foo",)])
with pytest.raises(AssertionError):
assert_story([], [("foo",)])
# strict=True
assert_story([], [], strict=True)
assert_story([("foo", "id1", now)], [("foo",)])
assert_story(story, [("foo",), ("bar",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_story(story, [("foo",), ("bar",)], strict=True)
with pytest.raises(AssertionError):
assert_story(story, [("foo",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_story(story, [], strict=True)
@pytest.mark.parametrize(
"story_factory",
[
pytest.param(lambda: [()], id="Missing payload, stimulus_id, ts"),
pytest.param(lambda: [("foo",)], id="Missing (stimulus_id, ts)"),
pytest.param(lambda: [("foo", "bar")], id="Missing ts"),
pytest.param(lambda: [("foo", "bar", "baz")], id="ts is not a float"),
pytest.param(lambda: [("foo", "bar", time() + 3600)], id="ts is in the future"),
pytest.param(lambda: [("foo", "bar", time() - 7200)], id="ts is too old"),
pytest.param(lambda: [("foo", 123, time())], id="stimulus_id is not a str"),
pytest.param(lambda: [("foo", "", time())], id="stimulus_id is an empty str"),
pytest.param(lambda: [("", time())], id="no payload"),
pytest.param(
lambda: [("foo", "id", time()), ("foo", "id", time() - 10)],
id="timestamps out of order",
),
],
)
def test_assert_story_malformed_story(story_factory):
# defer the calls to time() to when the test runs rather than collection
story = story_factory()
with pytest.raises(AssertionError, match="Malformed story event"):
assert_story(story, [])
@pytest.mark.parametrize("strict", [True, False])
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_assert_story_identity(c, s, a, strict):
f1 = c.submit(inc, 1, key="f1")
f2 = c.submit(inc, f1, key="f2")
assert await f2 == 3
scheduler_story = s.story(f2.key)
assert scheduler_story
worker_story = a.state.story(f2.key)
assert worker_story
assert_story(worker_story, worker_story, strict=strict)
assert_story(scheduler_story, scheduler_story, strict=strict)
with pytest.raises(AssertionError):
assert_story(scheduler_story, worker_story, strict=strict)
with pytest.raises(AssertionError):
assert_story(worker_story, scheduler_story, strict=strict)
# Note: WINDOWS constant doesn't work with `mypy --platform win32`
if sys.platform == "win32":
TERM_SIGNALS = (signal.SIGTERM, signal.SIGINT)
else:
TERM_SIGNALS = (signal.SIGTERM, signal.SIGHUP, signal.SIGINT)
def garbage_process(
barrier: Barrier, ignore_sigterm: bool = False, t: float = 3600
) -> None:
if ignore_sigterm:
for signum in TERM_SIGNALS:
signal.signal(signum, signal.SIG_IGN)
barrier.wait()
sleep(t)
def test_check_process_leak():
barrier = get_mp_context().Barrier(parties=2)
with pytest.raises(AssertionError):
with check_process_leak(check=True, check_timeout=0.01):
p = get_mp_context().Process(target=garbage_process, args=(barrier,))
p.start()
barrier.wait()
assert not p.is_alive()
def test_check_process_leak_slow_cleanup():
"""check_process_leak waits a bit for processes to terminate themselves"""
barrier = get_mp_context().Barrier(parties=2)
with check_process_leak(check=True):
p = get_mp_context().Process(target=garbage_process, args=(barrier, False, 0.2))
p.start()
barrier.wait()
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_pre_cleanup(ignore_sigterm):
barrier = get_mp_context().Barrier(parties=2)
p = get_mp_context().Process(target=garbage_process, args=(barrier, ignore_sigterm))
p.start()
barrier.wait()
with check_process_leak(term_timeout=0.2):
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_post_cleanup(ignore_sigterm):
barrier = get_mp_context().Barrier(parties=2)
with check_process_leak(check=False, term_timeout=0.2):
p = get_mp_context().Process(
target=garbage_process, args=(barrier, ignore_sigterm)
)
p.start()
barrier.wait()
assert not p.is_alive()
@pytest.mark.parametrize("nanny", [True, False])
def test_start_failure_worker(nanny):
if nanny:
ctx = raises_with_cause(RuntimeError, None, TypeError, None)
else:
ctx = pytest.raises(TypeError)
with ctx:
with cluster(nanny=nanny, worker_kwargs={"foo": "bar"}):
return
def test_start_failure_scheduler():
with pytest.raises(TypeError):
with cluster(scheduler_kwargs={"foo": "bar"}):
return
def test_invalid_transitions(capsys):
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_log_invalid_transitions(c, s, a):
x = c.submit(inc, 1, key="task-name")
await x
ts = a.state.tasks["task-name"]
ev = PauseEvent(stimulus_id="test")
with mock.patch.object(
WorkerState, "_handle_event", return_value=({ts: "foo"}, [])
):
with pytest.raises(InvalidTransition):
a.handle_stimulus(ev)
while not s.get_events("invalid-worker-transition"):
await asyncio.sleep(0.01)
with pytest.raises(Exception) as info:
test_log_invalid_transitions()
assert "invalid" in str(info).lower()
assert "worker" in str(info).lower()
assert "transition" in str(info).lower()
out, err = capsys.readouterr()
assert "foo" in out + err
assert "task-name" in out + err
def test_invalid_worker_state(capsys):
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_log_invalid_worker_task_state(c, s, a):
x = c.submit(inc, 1, key="task-name")
await x
a.state.tasks[x.key].state = "released"
with pytest.raises(InvalidTaskState):
a.validate_state()
while not s.get_events("invalid-worker-task-state"):
await asyncio.sleep(0.01)
with pytest.raises(Exception) as info:
test_log_invalid_worker_task_state()
out, err = capsys.readouterr()
assert "released" in out + err
assert "task-name" in out + err
def test_raises_with_cause():
with raises_with_cause(RuntimeError, "exception", ValueError, "cause"):
raise RuntimeError("exception") from ValueError("cause")
with raises_with_cause(RuntimeError, "exception", ValueError, "tial mat"):
raise RuntimeError("exception") from ValueError("partial match")
with raises_with_cause(RuntimeError, None, ValueError, "cause"):
raise RuntimeError("exception") from ValueError("cause")
with raises_with_cause(RuntimeError, "exception", ValueError, None):
raise RuntimeError("exception") from ValueError("bar")
with raises_with_cause(RuntimeError, None, ValueError, None):
raise RuntimeError("foo") from ValueError("bar")
# we're trying to stick to pytest semantics
# If the exception types don't match, raise the first exception that doesn't match
# If the text doesn't match, raise an assert
with pytest.raises(OSError):
with raises_with_cause(RuntimeError, "exception", ValueError, "cause"):
raise RuntimeError("exception") from OSError("cause")
with pytest.raises(OSError):
with raises_with_cause(RuntimeError, "exception", ValueError, "cause"):
raise OSError("exception") from ValueError("cause")
with pytest.raises(AssertionError):
with raises_with_cause(RuntimeError, "exception", ValueError, "foo"):
raise RuntimeError("exception") from ValueError("cause")
with pytest.raises(AssertionError):
with raises_with_cause(RuntimeError, "foo", ValueError, "cause"):
raise RuntimeError("exception") from ValueError("cause")
# There can be more than one nested cause
with raises_with_cause(
RuntimeError, "exception", ValueError, "cause1", OSError, "cause2"
):
try:
raise ValueError("cause1") from OSError("cause2")
except ValueError as e:
raise RuntimeError("exception") from e
with pytest.raises(OSError):
with raises_with_cause(
RuntimeError, "exception", ValueError, "cause1", TypeError, "cause2"
):
try:
raise ValueError("cause1") from OSError("cause2")
except ValueError as e:
raise RuntimeError("exception") from e
with pytest.raises(AssertionError):
with raises_with_cause(
RuntimeError, "exception", ValueError, "cause1", OSError, "cause2"
):
try:
raise ValueError("cause1") from OSError("no match")
except ValueError as e:
raise RuntimeError("exception") from e
@pytest.mark.slow
def test_check_thread_leak():
event = threading.Event()
t1 = threading.Thread(target=lambda: (event.wait(), "one"))
t1.start()
t2 = t3 = None
try:
with pytest.raises(
pytest.fail.Exception, match=r"2 thread\(s\) were leaked"
) as exc:
with check_thread_leak():
t2 = threading.Thread(target=lambda: (event.wait(), "two"))
t2.start()
t3 = threading.Thread(target=lambda: (event.wait(), "three"))
t3.start()
msg = exc.value.msg
assert msg
print(msg) # For reference, if test fails
# First, outer thread is ignored
assert msg.count("Call stack of leaked thread") == 2
assert "one" not in msg
# Make sure we can see the full traceback, not just the last line
assert msg.count(__file__) == 2
assert 'target=lambda: (event.wait(), "two")' in msg
assert 'target=lambda: (event.wait(), "three")' in msg
# Ensure there aren't too many or too few newlines
exc.match(r'event.wait\(\), "three"\)\)\n +File')
finally:
# Clean up
event.set()
t1.join(5)
if t2:
t2.join(5)
if t3:
t3.join(5)
@pytest.mark.parametrize("sync", [True, False])
def test_fail_hard(sync):
"""@fail_hard is a last resort when error handling for everything that we foresaw
could possibly go wrong failed.
Instead of trying to force a crash here, we'll write custom methods which do crash.
"""
class CustomError(Exception):
pass
class FailWorker(Worker):
@fail_hard
def fail_sync(self):
raise CustomError()
@fail_hard
async def fail_async(self):
raise CustomError()
test_done = False
@gen_cluster(nthreads=[])
async def test(s):
nonlocal test_done
with captured_logger("distributed.worker") as logger:
async with FailWorker(s.address) as a:
with pytest.raises(CustomError):
if sync:
a.fail_sync()
else:
await a.fail_async()
while a.status != Status.closed:
await asyncio.sleep(0.01)
method_name = "fail_sync" if sync else "fail_async"
assert f"worker-{method_name}-fail-hard" in logger.getvalue()
test_done = True
with pytest.raises(CustomError):
test()
assert test_done
def test_popen_write_during_terminate_deadlock():
# Fabricate a command which, when terminated, tries to write more than the pipe
# buffer can hold (OS specific: on Linux it's typically 65536 bytes; on Windows it's
# less). This would deadlock if `proc.wait()` was called, since the process will be
# trying to write to stdout, but stdout isn't being cleared because our process is
# blocked in `proc.wait()`. `proc.communicate()` is necessary:
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen.wait
with popen(
[
sys.executable,
"-c",
textwrap.dedent(
"""
import signal
import threading
e = threading.Event()
def cb(signum, frame):
# 131072 is 2x the size of the default Linux pipe buffer
print('x' * 131072)
e.set()
signal.signal(signal.SIGINT, cb)
print('ready', flush=True)
e.wait()
"""
),
],
capture_output=True,
) as proc:
assert proc.stdout.readline().strip() == b"ready"
# Exiting the context manager (terminating the subprocess) will raise
# `subprocess.TimeoutExpired` if this test breaks.
def test_popen_timeout(capsys):
with pytest.raises(subprocess.TimeoutExpired):
with popen(
[
sys.executable,
"-c",
textwrap.dedent(
"""
import signal
import sys
import time
signum = signal.SIGBREAK if sys.platform == "win32" else signal.SIGINT
signal.signal(signum, signal.SIG_IGN)
print("ready", flush=True)
while True:
time.sleep(0.1)
print("slept", flush=True)
"""
),
],
capture_output=True,
terminate_timeout=1,
) as proc:
assert proc.stdout
assert proc.stdout.readline().strip() == b"ready"
# Exiting contextmanager sends SIGINT/SIGBREAK, waits 1s for shutdown.
# Our script ignores SIGINT/SIGBREAK, so after 1s it sends SIGKILL.
# The contextmanager raises `TimeoutExpired` once the process is killed,
# because it failed the 1s timeout
captured = capsys.readouterr()
assert "stdout: returncode" in captured.out
assert "slept" in captured.out
def test_popen_always_prints_output(capsys):
# We always print stdout even if there was no error, in case some other assertion
# later in the test fails and the output would be useful.
with popen([sys.executable, "-c", "print('foo')"], capture_output=True) as proc:
proc.communicate(timeout=5)
captured = capsys.readouterr()
assert "stdout: returncode 0" in captured.out
assert "foo" in captured.out
@gen_test()
async def test_freeze_batched_send():
async with EchoServer() as e:
comm = await connect(e.address)
b = BatchedSend(interval=0)
b.start(comm)
b.send("hello")
assert await comm.read() == ("hello",)
with freeze_batched_send(b) as locked_comm:
b.send("foo")
b.send("bar")
# Sent messages are available on the write queue
msg = await locked_comm.write_queue.get()
assert msg == (comm.peer_address, ["foo", "bar"])
# Sent messages will not reach the echo server
await asyncio.sleep(0.01)
assert e.count == 1
# Now we let messages send to the echo server
locked_comm.write_event.set()
assert await comm.read() == ("foo", "bar")
assert e.count == 2
locked_comm.write_event.clear()
b.send("baz")
await asyncio.sleep(0.01)
assert e.count == 2
assert b.comm is comm
assert await comm.read() == ("baz",)
assert e.count == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_wait_for_state(c, s, a, capsys):
ev = Event()
x = c.submit(lambda ev: ev.wait(), ev, key="x")
await asyncio.gather(
wait_for_state("x", "processing", s),
wait_for_state("x", "executing", a),
c.run(wait_for_state, "x", "executing"),
)
await ev.set()
await asyncio.gather(
wait_for_state("x", "memory", s),
wait_for_state("x", {"memory", "other"}, a),
c.run(wait_for_state, "x", "memory"),
)
with pytest.raises(asyncio.TimeoutError):
await wait_for(wait_for_state("x", "bad_state", s), timeout=0.1)
with pytest.raises(asyncio.TimeoutError):
await wait_for(wait_for_state("x", ("this", "that"), s), timeout=0.1)
with pytest.raises(asyncio.TimeoutError):
await wait_for(wait_for_state("y", "memory", s), timeout=0.1)
assert capsys.readouterr().out == (
f"tasks['x'].state='memory' on {s.address}; expected state='bad_state'\n"
f"tasks['x'].state='memory' on {s.address}; expected state=('this', 'that')\n"
f"tasks['y'] not found on {s.address}\n"
)
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_wait_for_stimulus(c, s, a):
t1 = asyncio.create_task(wait_for_stimulus(ComputeTaskEvent, a))
t2 = asyncio.create_task(wait_for_stimulus(ComputeTaskEvent, a, key="y"))
await asyncio.sleep(0.05)
assert not t1.done()
assert not t2.done()
x = c.submit(inc, 1, key="x")
ev = await t1
assert isinstance(ev, ComputeTaskEvent)
await wait_for_stimulus(ComputeTaskEvent, a, key="x")
await c.run(wait_for_stimulus, ComputeTaskEvent, key="x")
assert not t2.done()
y = c.submit(inc, 1, key="y")
await t2
def test_ws_with_running_task(ws_with_running_task):
ws = ws_with_running_task
ts = ws.tasks["x"]
assert ts.resource_restrictions == {"R": 1}
assert ws.available_resources == {"R": 0}
assert ws.total_resources == {"R": 1}
assert ts.state in ("executing", "long-running")
def test_sizeof():
assert sizeof(SizeOf(100)) == 100
assert isinstance(gen_nbytes(100), SizeOf)
assert sizeof(gen_nbytes(100)) == 100
@pytest.mark.parametrize(
"input, exc, msg",
[
(12345.0, TypeError, "Expected integer"),
(-1, ValueError, "larger than"),
(0, ValueError, "larger than"),
(10, ValueError, "larger than"),
],
)
def test_sizeof_error(input, exc, msg):
with pytest.raises(exc, match=msg):
SizeOf(input)
@gen_test()
async def test_ensure_no_new_clients():
with ensure_no_new_clients():
async with Scheduler(dashboard_address=":0") as s:
pass
async with Scheduler(dashboard_address=":0") as s:
with ensure_no_new_clients():
pass
with pytest.raises(AssertionError):
with ensure_no_new_clients():
async with Client(s.address, asynchronous=True):
pass
async with Client(s.address, asynchronous=True):
with ensure_no_new_clients():
pass
@pytest.mark.parametrize("nanny", [False, True])
def test_cluster_uses_config_for_test(nanny):
key = "distributed.admin.tick.interval"
local = dask.config.get(key)
with cluster(nanny=nanny, nworkers=1) as (scheduler, workers):
with Client(scheduler["address"]) as client:
s_remote = client.run_on_scheduler(dask.config.get, key)
assert s_remote != local
w_remote = client.run(dask.config.get, key)
w_remote = next(iter(w_remote.values()))
assert w_remote != local
assert w_remote == s_remote
def test_captured_logger():
log1 = logging.getLogger("test_captured_logger")
log2 = logging.getLogger("test_captured_logger.child")
log3 = logging.getLogger("test_unrelated_logger")
with captured_logger("test_captured_logger", level=logging.WARNING) as cap:
log1.info("A")
log1.warning("B")
log1.error("C")
log2.error("D")
log3.error("E")
assert cap.getvalue() == "B\nC\nD\n"
def test_captured_context_meter():
with captured_context_meter() as metrics:
assert metrics == {}
context_meter.digest_metric("foo", 1, "s")
context_meter.digest_metric("foo", 2, "s") # Addition
context_meter.digest_metric("foo", 4, "t") # Addition
context_meter.digest_metric(123, 5.1, "t") # Non-string label
context_meter.digest_metric(("a", "o"), 6, "u") # tuple label
assert metrics == {
("foo", "s"): 3,
("foo", "t"): 4,
(123, "t"): 5.1,
("a", "o", "u"): 6,
}
assert isinstance(metrics["foo", "s"], int)
| MyServer |
python | django__django | tests/admin_changelist/models.py | {
"start": 2944,
"end": 3263
} | class ____(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column="number_val")
objects = OrderedObjectManager()
| OrderedObject |
python | getsentry__sentry | tests/sentry/core/endpoints/test_team_stats.py | {
"start": 285,
"end": 1351
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
team = self.create_team(members=[self.user])
project_1 = self.create_project(teams=[team], name="a")
project_2 = self.create_project(teams=[team], name="b")
team_2 = self.create_team(members=[self.user])
project_3 = self.create_project(teams=[team_2], name="c")
for project, count in ((project_1, 2), (project_2, 1), (project_3, 4)):
for _ in range(count):
self.store_event(
data={
"timestamp": before_now(minutes=5).isoformat(),
},
project_id=project.id,
)
url = reverse(
"sentry-api-0-team-stats",
kwargs={
"organization_id_or_slug": team.organization.slug,
"team_id_or_slug": team.slug,
},
)
response = self.client.get(url)
assert response.status_code == 200, response.content
| TeamStatsTest |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 60400,
"end": 61781
} | class ____(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional input Tensor.
The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super().__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.log_softmax(input, self.dim, _stacklevel=5)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"dim={self.dim}"
| LogSoftmax |
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 5683,
"end": 6106
} | class ____(PipenvFileError):
def __init__(self, filename="Pipfile", extra=None, **kwargs):
extra = kwargs.pop("extra", [])
message = "{} {}".format(
"[bold red]Aborting![/bold red]",
"[bold]Please ensure that the file exists and is located in your project root directory.[/bold]",
)
super().__init__(filename, message=message, extra=extra, **kwargs)
| PipfileNotFound |
python | kennethreitz__tablib | src/tablib/formats/_jira.py | {
"start": 90,
"end": 1087
} | class ____:
title = 'jira'
@classmethod
def export_set(cls, dataset):
"""Formats the dataset according to the Jira table syntax:
||heading 1||heading 2||heading 3||
|col A1|col A2|col A3|
|col B1|col B2|col B3|
:param dataset: dataset to serialize
:type dataset: tablib.core.Dataset
"""
header = cls._get_header(dataset.headers) if dataset.headers else ''
body = cls._get_body(dataset)
return f'{header}\n{body}' if header else body
@classmethod
def _get_body(cls, dataset):
return '\n'.join([cls._serialize_row(row) for row in dataset])
@classmethod
def _get_header(cls, headers):
return cls._serialize_row(headers, delimiter='||')
@classmethod
def _serialize_row(cls, row, delimiter='|'):
return '{}{}{}'.format(
delimiter,
delimiter.join([str(item) if item else ' ' for item in row]),
delimiter
)
| JIRAFormat |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-occurrences-of-a-substring.py | {
"start": 1085,
"end": 1633
} | class ____(object):
def maxFreq(self, s, maxLetters, minSize, maxSize):
"""
:type s: str
:type maxLetters: int
:type minSize: int
:type maxSize: int
:rtype: int
"""
lookup = {}
for right in xrange(minSize-1, len(s)):
word = s[right-minSize+1:right+1]
if word in lookup:
lookup[word] += 1
elif len(collections.Counter(word)) <= maxLetters:
lookup[word] = 1
return max(lookup.values() or [0])
| Solution2 |
python | apache__airflow | airflow-core/src/airflow/models/asset.py | {
"start": 4639,
"end": 6116
} | class ____(Base):
"""A table to store asset watchers."""
name: Mapped[str] = mapped_column(
String(length=1500).with_variant(
String(
length=1500,
# latin1 allows for more indexed length in mysql
# and this field should only be ascii chars
collation="latin1_general_cs",
),
"mysql",
),
nullable=False,
)
asset_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False)
trigger_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False)
asset = relationship("AssetModel", back_populates="watchers")
trigger = relationship("Trigger", back_populates="asset_watchers")
__tablename__ = "asset_watcher"
__table_args__ = (
PrimaryKeyConstraint(asset_id, trigger_id, name="asset_watcher_pkey"),
ForeignKeyConstraint(
columns=(asset_id,),
refcolumns=["asset.id"],
name="awm_asset_id_fkey",
ondelete="CASCADE",
),
ForeignKeyConstraint(
columns=(trigger_id,),
refcolumns=["trigger.id"],
name="awm_trigger_id_fkey",
ondelete="CASCADE",
),
Index("idx_awm_trigger_id", trigger_id),
)
def __repr__(self):
return f"{self.__class__.__name__}(name={self.name!r}, asset_id={self.asset_id!r}, trigger_id={self.trigger_id!r})"
| AssetWatcherModel |
python | walkccc__LeetCode | solutions/966. Vowel Spellchecker/966.py | {
"start": 0,
"end": 771
} | class ____:
def spellchecker(self, wordlist: list[str], queries: list[str]) -> list[str]:
def lowerKey(word: str) -> str:
return '$' + ''.join([c.lower() for c in word])
def vowelKey(word: str) -> str:
return ''.join(['*' if c.lower() in 'aeiou' else c.lower() for c in word])
ans = []
dict = {}
for word in wordlist:
dict.setdefault(word, word)
dict.setdefault(lowerKey(word), word)
dict.setdefault(vowelKey(word), word)
for query in queries:
if query in dict:
ans.append(dict[query])
elif lowerKey(query) in dict:
ans.append(dict[lowerKey(query)])
elif vowelKey(query) in dict:
ans.append(dict[vowelKey(query)])
else:
ans.append('')
return ans
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 64491,
"end": 64872
} | class ____(_ConfigBase):
vectorizer: Union[Vectorizers, str]
model: Dict[str, Any]
source_properties: Optional[List[str]]
def to_dict(self) -> Dict[str, Any]:
ret_dict = super().to_dict()
if "sourceProperties" in ret_dict:
ret_dict["properties"] = ret_dict.pop("sourceProperties")
return ret_dict
@dataclass
| _NamedVectorizerConfig |
python | google__pytype | pytype_extensions/instrumentation_for_testing_test.py | {
"start": 2334,
"end": 4238
} | class ____(unittest.TestCase):
def testFakeNoCtor(self):
orig_fake_obj = FakeNoCtor(3)
obj = orig_fake_obj.Seal()
assert_type(obj, NoCtor)
for expected_call_count in (1, 2):
self.assertEqual(ProductionCodePassNoCtor(obj), 600)
fake_obj = i4t.Unseal(obj, FakeNoCtor)
assert fake_obj is orig_fake_obj
assert_type(fake_obj, FakeNoCtor)
self.assertEqual(fake_obj.call_count, expected_call_count)
def testFakeNoCtorInitArg(self):
obj = FakeNoCtorInitArgUnsealed(5).Seal()
assert_type(obj, NoCtor)
self.assertEqual(ProductionCodePassNoCtor(obj), 1030)
fake_obj = i4t.Unseal(obj, FakeNoCtorInitArgUnsealed)
assert_type(fake_obj, FakeNoCtorInitArgUnsealed)
self.assertEqual(fake_obj.state, 5)
def testFakeNoCtorDefaultInit(self):
obj = FakeNoCtorDefaultInitSealed()
assert_type(obj, NoCtor)
self.assertEqual(ProductionCodePassNoCtor(obj), 208)
fake_obj = i4t.Unseal(obj, FakeNoCtorDefaultInitUnsealed)
assert_type(fake_obj, FakeNoCtorDefaultInitUnsealed)
def testFakeNoCtorInitNoArgs(self):
obj = FakeNoCtorInitNoArgsSealed()
assert_type(obj, NoCtor)
self.assertEqual(ProductionCodePassNoCtor(obj), 1680)
fake_obj = i4t.Unseal(obj, FakeNoCtorInitNoArgsUnsealed)
assert_type(fake_obj, FakeNoCtorInitNoArgsUnsealed)
self.assertEqual(fake_obj.state, 8)
def testFakeNoCtorSealedAs(self):
obj = FakeNoCtorSealedAs()
assert_type(obj, NoCtor)
self.assertEqual(ProductionCodePassNoCtor(obj), 612)
def testFakeWithCtor(self):
orig_fake_obj = FakeWithCtor()
obj = orig_fake_obj.Seal()
assert_type(obj, WithCtor)
self.assertEqual(ProductionCodePassWithCtor(obj), 3500)
fake_obj = i4t.Unseal(obj, FakeWithCtor)
assert fake_obj is orig_fake_obj
assert_type(fake_obj, FakeWithCtor)
if __name__ == "__main__":
unittest.main()
| InstrumentationForTestingTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 71132,
"end": 71274
} | class ____(_TestMethodsMutating, __TestCase):
constructor1 = SetSubclass
constructor2 = SetSubclass
| TestMethodsMutating_Subclass_Subclass |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 49064,
"end": 51505
} | class ____:
def test_not_all_oids(self):
with pytest.raises(TypeError):
x509.ExtendedKeyUsage(["notoid"]) # type:ignore[list-item]
def test_iter_len(self):
eku = x509.ExtendedKeyUsage(
[
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
]
)
assert len(eku) == 2
assert list(eku) == [
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CLIENT_AUTH,
]
def test_iter_input(self):
usages = [
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
]
aia = x509.ExtendedKeyUsage(iter(usages))
assert list(aia) == usages
def test_repr(self):
eku = x509.ExtendedKeyUsage(
[
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
]
)
assert repr(eku) == (
"<ExtendedKeyUsage([<ObjectIdentifier(oid=1.3.6.1.5.5.7.3.1, name="
"serverAuth)>, <ObjectIdentifier(oid=1.3.6.1.5.5.7.3.2, name=clien"
"tAuth)>])>"
)
def test_eq(self):
eku = x509.ExtendedKeyUsage(
[x509.ObjectIdentifier("1.3.6"), x509.ObjectIdentifier("1.3.7")]
)
eku2 = x509.ExtendedKeyUsage(
[x509.ObjectIdentifier("1.3.6"), x509.ObjectIdentifier("1.3.7")]
)
assert eku == eku2
def test_ne(self):
eku = x509.ExtendedKeyUsage([x509.ObjectIdentifier("1.3.6")])
eku2 = x509.ExtendedKeyUsage([x509.ObjectIdentifier("1.3.6.1")])
assert eku != eku2
assert eku != object()
def test_hash(self):
eku = x509.ExtendedKeyUsage(
[x509.ObjectIdentifier("1.3.6"), x509.ObjectIdentifier("1.3.7")]
)
eku2 = x509.ExtendedKeyUsage(
[x509.ObjectIdentifier("1.3.6"), x509.ObjectIdentifier("1.3.7")]
)
eku3 = x509.ExtendedKeyUsage([x509.ObjectIdentifier("1.3.6")])
assert hash(eku) == hash(eku2)
assert hash(eku) != hash(eku3)
def test_public_bytes(self):
ext = x509.ExtendedKeyUsage(
[x509.ObjectIdentifier("1.3.6"), x509.ObjectIdentifier("1.3.7")]
)
assert ext.public_bytes() == b"0\x08\x06\x02+\x06\x06\x02+\x07"
| TestExtendedKeyUsage |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 175573,
"end": 176117
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("owner_id", "setting_value", "client_mutation_id")
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
setting_value = sgqlc.types.Field(
sgqlc.types.non_null(NotificationRestrictionSettingValue),
graphql_name="settingValue",
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateNotificationRestrictionSettingInput |
python | doocs__leetcode | solution/2200-2299/2241.Design an ATM Machine/Solution.py | {
"start": 0,
"end": 761
} | class ____:
def __init__(self):
self.d = [20, 50, 100, 200, 500]
self.m = len(self.d)
self.cnt = [0] * self.m
def deposit(self, banknotesCount: List[int]) -> None:
for i, x in enumerate(banknotesCount):
self.cnt[i] += x
def withdraw(self, amount: int) -> List[int]:
ans = [0] * self.m
for i in reversed(range(self.m)):
ans[i] = min(amount // self.d[i], self.cnt[i])
amount -= ans[i] * self.d[i]
if amount > 0:
return [-1]
for i, x in enumerate(ans):
self.cnt[i] -= x
return ans
# Your ATM object will be instantiated and called as such:
# obj = ATM()
# obj.deposit(banknotesCount)
# param_2 = obj.withdraw(amount)
| ATM |
python | pytorch__pytorch | test/fx/test_shape_inference.py | {
"start": 419,
"end": 4171
} | class ____(unittest.TestCase):
def test_infer_symbol_values(self):
def mksym(shape_env, value, source, dynamic_dim) -> None:
return shape_env.create_symintnode(
shape_env.create_symbol(
value,
source=source,
dynamic_dim=dynamic_dim,
),
hint=value,
source=source,
)
shape_env = ShapeEnv()
N = 8
sample = {f"s{i}": 2 for i in range(N)}
init_symints = [
mksym(shape_env, v, LocalSource(k), DimDynamic.DYNAMIC)
for k, v in sample.items()
]
symints = copy.deepcopy(init_symints)
symbol_to_idx_dict = {f"s{i}": i for i in range(N)}
padding_constraints = defaultdict(list)
# prepare constraints strings
constraints = []
constraints.append(
"The size of tensor a (s1) must match the size of tensor b (1773) at non-singleton dimension 1)"
)
constraints.append(
"Expected size for first two dimensions of batch2 tensor to be: [s0, (s2//2) + 12] but got: [s0, 120]."
)
constraints.append("shape '[s0, -1, 32]' is invalid for input of size s0*s3")
constraints.append(
"a and b must have same reduction dim, but got [32*s0, s3] X [20, 15]."
)
constraints.append(
"a and b must have same reduction dim, but got [s0, s4 + 1568] X [5728, 1024]."
)
constraints.append(
"Expected size for first two dimensions of batch2 tensor to be: [s0, 40] but got: [s0, s5]."
)
constraints.append(
"shape '[s0, -1, 32]' is invalid for input of size s0*s6 + 1344*s0"
)
constraints.append(
"shape '[-1, 47]' is invalid for input of size 32*s0*s6 + 1344*s0"
)
constraints.append(
"Expected size for first two dimensions of batch2 tensor to be: [s0, 47*s6] but got: [s0*s6, 47]."
)
constraints.append("Split sizes add up to 4258 but got the tensor's size of s7")
for constraint in constraints:
infer_symbol_values(
symints,
init_symints,
symbol_to_idx_dict,
padding_constraints,
constraint,
)
self.assertEqual(symints[1], 1773)
self.assertEqual(symints[2], 216)
self.assertEqual(symints[3], 640)
self.assertEqual(symints[4], 4160)
self.assertEqual(symints[5], 40)
self.assertEqual(symints[6], 160)
self.assertEqual(symints[7], 4258)
def test_infer_shape(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w_1 = torch.empty([256, 328])
self.b_1 = torch.empty([256])
self.w_2 = torch.empty([328, 256])
self.b_2 = torch.empty([328])
def forward(self, x):
l_1 = torch.nn.functional.linear(x, self.w_1, bias=self.b_1)
s_1 = torch.sigmoid(l_1)
l_2 = torch.nn.functional.linear(s_1, self.w_2, bias=self.b_2)
t_1 = torch.tanh(l_2)
return t_1
def generate_graph_module(model):
gm = fx.symbolic_trace(model)
return gm
m = TestModule()
gm = generate_graph_module(m)
input_tensors = [torch.randn(1, 1)]
infer_shape(gm, input_tensors)
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
| TestShapeInference |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 28469,
"end": 28702
} | class ____(RayError):
"""Raised when the corresponding placement group was removed."""
def __str__(self):
return "The placement group corresponding to this Actor has been removed."
@PublicAPI
| ActorPlacementGroupRemoved |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass8.py | {
"start": 374,
"end": 516
} | class ____:
name: str = "sample"
dir_a: Path = Path.home().joinpath(f"source/{name}")
dir_b: Path = dir_a.joinpath("path/to/b")
| ClassC |
python | numpy__numpy | numpy/_core/tests/test_defchararray.py | {
"start": 6412,
"end": 6632
} | class ____(TestComparisons):
"""Ticket #1276"""
def A(self):
return np.array(
[['abc', 'abcc', '123'],
['789', 'abc', 'xyz']], np.str_).view(np.char.chararray)
| TestComparisonsMixed2 |
python | wandb__wandb | wandb/vendor/pygments/lexers/console.py | {
"start": 1105,
"end": 4120
} | class ____(RegexLexer):
"""
Lexer for PyPy log files.
.. versionadded:: 1.5
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Text),
(r"#.*?$", Comment),
],
}
| PyPyLogLexer |
python | pytorch__pytorch | torch/_dynamo/variables/tensor.py | {
"start": 66623,
"end": 68906
} | class ____(UserDefinedClassVariable):
def call_function(
self,
tx: "InstructionTranslator",
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
# Handle `Subclass(existing_tensor, ...)` calls.
from .torch_function import TensorWithTFOverrideVariable
new_func = self.value.__new__
if new_func is torch.Tensor.__new__:
if (
len(args) == 1
and isinstance(args[0], TensorVariable)
and len(kwargs) == 0
):
data = args[0]
# Simulate `torch.Tensor.__new__` as shallow-copying the input
# tensor data with a new type. TODO polyfill?
var = TensorWithTFOverrideVariable.from_tensor_var(
tx, data, self.value, self.source
)
else:
unimplemented(
gb_type="Calling subclass default constructor with more than tensor argument",
context=f"{self.value}(args={args}, kwargs={kwargs})",
explanation="Currently not supported",
hints=[
"Avoid this constructor call or move it outside "
"`torch.compile` regione",
*graph_break_hints.SUPPORTABLE,
],
)
else:
# Let Dynamo trace through custom `__new__`
var = VariableTracker.build(tx, new_func).call_function(
tx, [self] + args, kwargs
)
# Let Dynamo trace through custom `__init__`
init_func = self.value.__init__
# TODO builder should be able to handle `torch.Tensor.__init__`,
# which is `object.__init__`, so that we can remove this check.
if init_func is not torch.Tensor.__init__:
VariableTracker.build(tx, init_func).call_function(tx, [var], kwargs)
# See NOTE [Side effect tracking for newly constructed tensor]
tx.output.side_effects._track_obj(
object(), var, mutation_type_cls=AttributeMutationNew
)
return var
def as_python_constant(self):
return self.value
| TensorSubclassVariable |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/spark_kubernetes.py | {
"start": 1941,
"end": 15991
} | class ____(KubernetesPodOperator):
"""
Creates sparkApplication object in kubernetes cluster.
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.3.3-3.1.1/docs/api-docs.md#sparkapplication
:param image: Docker image you wish to launch. Defaults to hub.docker.com,
:param code_path: path to the spark code in image,
:param namespace: kubernetes namespace to put sparkApplication
:param name: name of the pod in which the task will run, will be used (plus a random
suffix if random_name_suffix is True) to generate a pod id (DNS-1123 subdomain,
containing only [a-z0-9.-]).
:param application_file: filepath to kubernetes custom_resource_definition of sparkApplication
:param template_spec: kubernetes sparkApplication specification
:param get_logs: get the stdout of the container as logs of the tasks.
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:param success_run_history_limit: Number of past successful runs of the application to keep.
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:param log_events_on_failure: Log the pod's events if a failure occurs
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor.
When enabled, the operator automatically adds Airflow task context labels (dag_id, task_id, run_id)
to the driver and executor pods to enable finding them for reattachment.
:param delete_on_termination: What to do when the pod reaches its final
state, or the execution is interrupted. If True (default), delete the
pod; if False, leave the pod.
:param kubernetes_conn_id: the connection to Kubernetes cluster
:param random_name_suffix: If True, adds a random suffix to the pod name
"""
template_fields = ["application_file", "namespace", "template_spec", "kubernetes_conn_id"]
template_fields_renderers = {"template_spec": "py"}
template_ext = ("yaml", "yml", "json")
ui_color = "#f4a460"
BASE_CONTAINER_NAME = "spark-kubernetes-driver"
def __init__(
self,
*,
image: str | None = None,
code_path: str | None = None,
namespace: str = "default",
name: str | None = None,
application_file: str | None = None,
template_spec=None,
get_logs: bool = True,
do_xcom_push: bool = False,
success_run_history_limit: int = 1,
startup_timeout_seconds=600,
log_events_on_failure: bool = False,
reattach_on_restart: bool = True,
delete_on_termination: bool = True,
kubernetes_conn_id: str = "kubernetes_default",
random_name_suffix: bool = True,
**kwargs,
) -> None:
super().__init__(name=name, **kwargs)
self.image = image
self.code_path = code_path
self.application_file = application_file
self.template_spec = template_spec
self.kubernetes_conn_id = kubernetes_conn_id
self.startup_timeout_seconds = startup_timeout_seconds
self.reattach_on_restart = reattach_on_restart
self.delete_on_termination = delete_on_termination
self.do_xcom_push = do_xcom_push
self.namespace = namespace
self.get_logs = get_logs
self.log_events_on_failure = log_events_on_failure
self.success_run_history_limit = success_run_history_limit
self.random_name_suffix = random_name_suffix
# fix mypy typing
self.base_container_name: str
self.container_logs: list[str]
if self.base_container_name != self.BASE_CONTAINER_NAME:
self.log.warning(
"base_container_name is not supported and will be overridden to %s", self.BASE_CONTAINER_NAME
)
self.base_container_name = self.BASE_CONTAINER_NAME
if self.get_logs and self.container_logs != self.BASE_CONTAINER_NAME:
self.log.warning(
"container_logs is not supported and will be overridden to %s", self.BASE_CONTAINER_NAME
)
self.container_logs = [self.BASE_CONTAINER_NAME]
def _render_nested_template_fields(
self,
content: Any,
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set,
) -> None:
if id(content) not in seen_oids and isinstance(content, k8s.V1EnvVar):
seen_oids.add(id(content))
self._do_render_template_fields(content, ("value", "name"), context, jinja_env, seen_oids)
return
super()._render_nested_template_fields(content, context, jinja_env, seen_oids)
def manage_template_specs(self):
if self.application_file:
try:
filepath = Path(self.application_file.rstrip()).resolve(strict=True)
except (FileNotFoundError, OSError, RuntimeError, ValueError):
application_file_body = self.application_file
else:
application_file_body = filepath.read_text()
template_body = _load_body_to_dict(application_file_body)
if not isinstance(template_body, dict):
msg = f"application_file body can't transformed into the dictionary:\n{application_file_body}"
raise TypeError(msg)
elif self.template_spec:
template_body = self.template_spec
else:
raise AirflowException("either application_file or template_spec should be passed")
if "spark" not in template_body:
template_body = {"spark": template_body}
return template_body
def create_job_name(self):
name = (
self.name or self.template_body.get("spark", {}).get("metadata", {}).get("name") or self.task_id
)
if self.random_name_suffix:
updated_name = add_unique_suffix(name=name, max_len=MAX_LABEL_LEN)
else:
# truncation is required to maintain the same behavior as before
updated_name = name[:MAX_LABEL_LEN]
return self._set_name(updated_name)
@staticmethod
def _get_ti_pod_labels(context: Context | None = None, include_try_number: bool = True) -> dict[str, str]:
"""
Generate labels for the pod to track the pod in case of Operator crash.
:param include_try_number: add try number to labels
:param context: task context provided by airflow DAG
:return: dict.
"""
if not context:
return {}
context_dict = cast("dict", context)
ti = context_dict["ti"]
run_id = context_dict["run_id"]
labels = {
"dag_id": ti.dag_id,
"task_id": ti.task_id,
"run_id": run_id,
"spark_kubernetes_operator": "True",
}
map_index = ti.map_index
if map_index is not None and map_index >= 0:
labels["map_index"] = str(map_index)
if include_try_number:
labels.update(try_number=str(ti.try_number))
# In the case of sub dags this is just useful
# TODO: Remove this when the minimum version of Airflow is bumped to 3.0
if getattr(context_dict["dag"], "parent_dag", False):
labels["parent_dag_id"] = context_dict["dag"].parent_dag.dag_id
# Ensure that label is valid for Kube,
# and if not truncate/remove invalid chars and replace with short hash.
for label_id, label in labels.items():
safe_label = pod_generator.make_safe_label_value(str(label))
labels[label_id] = safe_label
return labels
@cached_property
def pod_manager(self) -> PodManager:
return PodManager(kube_client=self.client)
def _try_numbers_match(self, context, pod) -> bool:
task_instance = context["task_instance"]
task_context_labels = self._get_ti_pod_labels(context)
pod_try_number = pod.metadata.labels.get(task_context_labels.get("try_number", ""), "")
return str(task_instance.try_number) == str(pod_try_number)
@property
def template_body(self):
"""Templated body for CustomObjectLauncher."""
return self.manage_template_specs()
def find_spark_job(self, context, exclude_checked: bool = True):
label_selector = (
self._build_find_pod_label_selector(context, exclude_checked=exclude_checked)
+ ",spark-role=driver"
)
pod_list = self.client.list_namespaced_pod(self.namespace, label_selector=label_selector).items
pod = None
if len(pod_list) > 1: # and self.reattach_on_restart:
raise AirflowException(f"More than one pod running with labels: {label_selector}")
if len(pod_list) == 1:
pod = pod_list[0]
self.log.info(
"Found matching driver pod %s with labels %s", pod.metadata.name, pod.metadata.labels
)
self.log.info("`try_number` of task_instance: %s", context["ti"].try_number)
self.log.info("`try_number` of pod: %s", pod.metadata.labels.get("try_number", "unknown"))
return pod
def process_pod_deletion(self, pod, *, reraise=True):
if pod is not None:
if self.delete_on_termination:
pod_name = pod.metadata.name.replace("-driver", "")
self.log.info("Deleting spark job: %s", pod_name)
self.launcher.delete_spark_job(pod_name)
else:
self.log.info("skipping deleting spark job: %s", pod.metadata.name)
@cached_property
def hook(self) -> KubernetesHook:
hook = KubernetesHook(
conn_id=self.kubernetes_conn_id,
in_cluster=self.in_cluster or self.template_body.get("kubernetes", {}).get("in_cluster", False),
config_file=self.config_file
or self.template_body.get("kubernetes", {}).get("kube_config_file", None),
cluster_context=self.cluster_context
or self.template_body.get("kubernetes", {}).get("cluster_context", None),
)
return hook
@cached_property
def client(self) -> CoreV1Api:
return self.hook.core_v1_client
@cached_property
def custom_obj_api(self) -> CustomObjectsApi:
return CustomObjectsApi()
def get_or_create_spark_crd(self, launcher: CustomObjectLauncher, context) -> k8s.V1Pod:
if self.reattach_on_restart:
driver_pod = self.find_spark_job(context)
if driver_pod:
return driver_pod
driver_pod, spark_obj_spec = launcher.start_spark_job(
image=self.image, code_path=self.code_path, startup_timeout=self.startup_timeout_seconds
)
return driver_pod
def execute(self, context: Context):
self.name = self.create_job_name()
self._setup_spark_configuration(context)
if self.deferrable:
self.execute_async(context)
return super().execute(context)
def _setup_spark_configuration(self, context: Context):
"""Set up Spark-specific configuration including reattach logic."""
import copy
template_body = copy.deepcopy(self.template_body)
if self.reattach_on_restart:
task_context_labels = self._get_ti_pod_labels(context)
existing_pod = self.find_spark_job(context)
if existing_pod:
self.log.info(
"Found existing Spark driver pod %s. Reattaching to it.", existing_pod.metadata.name
)
self.pod = existing_pod
self.pod_request_obj = None
return
if "spark" not in template_body:
template_body["spark"] = {}
if "spec" not in template_body["spark"]:
template_body["spark"]["spec"] = {}
spec_dict = template_body["spark"]["spec"]
if "labels" not in spec_dict:
spec_dict["labels"] = {}
spec_dict["labels"].update(task_context_labels)
for component in ["driver", "executor"]:
if component not in spec_dict:
spec_dict[component] = {}
if "labels" not in spec_dict[component]:
spec_dict[component]["labels"] = {}
spec_dict[component]["labels"].update(task_context_labels)
self.log.info("Creating sparkApplication.")
self.launcher = CustomObjectLauncher(
name=self.name,
namespace=self.namespace,
kube_client=self.client,
custom_obj_api=self.custom_obj_api,
template_body=template_body,
)
self.pod = self.get_or_create_spark_crd(self.launcher, context)
self.pod_request_obj = self.launcher.pod_spec
def find_pod(self, namespace: str, context: Context, *, exclude_checked: bool = True):
"""Override parent's find_pod to use our Spark-specific find_spark_job method."""
return self.find_spark_job(context, exclude_checked=exclude_checked)
def on_kill(self) -> None:
if self.launcher:
self.log.debug("Deleting spark job for task %s", self.task_id)
self.launcher.delete_spark_job()
def patch_already_checked(self, pod: k8s.V1Pod, *, reraise=True):
"""Add an "already checked" annotation to ensure we don't reattach on retries."""
pod.metadata.labels["already_checked"] = "True"
body = PodGenerator.serialize_pod(pod)
self.client.patch_namespaced_pod(pod.metadata.name, pod.metadata.namespace, body)
def dry_run(self) -> None:
"""Print out the spark job that would be created by this operator."""
print(prune_dict(self.launcher.body, mode="strict"))
| SparkKubernetesOperator |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/slots.py | {
"start": 0,
"end": 59
} | class ____:
"""docstring"""
__slots__ = ['attr']
| Foo |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 13633,
"end": 14054
} | class ____(SendMessageToScheduler):
op = "task-finished"
key: Key
run_id: int
nbytes: int | None
type: bytes # serialized class
typename: str
metadata: dict
thread: int | None
startstops: list[StartStop]
__slots__ = tuple(__annotations__)
def to_dict(self) -> dict[str, Any]:
d = super().to_dict()
d["status"] = "OK"
return d
@dataclass
| TaskFinishedMsg |
python | facebookresearch__faiss | tests/torch_test_neural_net.py | {
"start": 7278,
"end": 8931
} | class ____(unittest.TestCase):
@torch.no_grad()
def test_decode(self):
torch.manual_seed(123)
step = QINCoStep(d=16, K=20, L=2, h=8)
codes = torch.randint(0, 20, (10, ))
xhat = torch.randn(10, 16)
ref_decode = step.decode(xhat, codes)
# step2 = copy_QINCoStep(step)
step2 = faiss.QINCoStep(step)
codes2 = faiss.Int32Tensor2D(codes[:, None].to(dtype=torch.int32))
np.testing.assert_array_equal(
step.codebook(codes).numpy(),
step2.codebook(codes2).numpy()
)
xhat2 = faiss.Tensor2D(xhat)
# xhat2 = faiss.Tensor2D(len(codes), step2.d)
new_decode = step2.decode(xhat2, codes2)
np.testing.assert_allclose(
ref_decode.numpy(),
new_decode.numpy(),
atol=2e-6
)
@torch.no_grad()
def test_encode(self):
torch.manual_seed(123)
step = QINCoStep(d=16, K=20, L=2, h=8)
# create plausible x for testing starting from actual codes
codes = torch.randint(0, 20, (10, ))
xhat = torch.zeros(10, 16)
x = step.decode(xhat, codes)
del codes
ref_codes, toadd = step.encode(xhat, x)
step2 = copy_QINCoStep(step)
xhat2 = faiss.Tensor2D(xhat)
x2 = faiss.Tensor2D(x)
toadd2 = faiss.Tensor2D(10, 16)
new_codes = step2.encode(xhat2, x2, toadd2)
np.testing.assert_allclose(
ref_codes.numpy(),
new_codes.numpy().ravel(),
atol=2e-6
)
np.testing.assert_allclose(toadd.numpy(), toadd2.numpy(), atol=2e-6)
| TestQINCoStep |
python | bokeh__bokeh | src/bokeh/models/labeling.py | {
"start": 2336,
"end": 4141
} | class ____(LabelingPolicy):
''' Select labels based on a user-defined policy function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing it to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular, those can be Bokeh's models.
These objects are made available to the labeling policy's code snippet as the
values of named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code that selects a subset of labels for display.
The following arguments a are available:
* ``indices``, a set-like object containing label indices to filter
* ``bboxes``, an array of bounding box objects per label
* ``distance(i, j)``, a function computing distance (in axis dimensions)
between labels. If labels i and j overlap, then ``distance(i, j) <= 0``.
* the keys of ``args`` mapping, if any
Example:
Only display labels at even indices:
.. code-block:: javascript
code = '''
for (const i of indices)
if (i % 2 == 1)
indices.unset(i)
'''
Alternatively, as a generator:
.. code-block:: javascript
code = '''
for (const i of indices)
if (i % 2 == 0)
yield i
'''
""")
| CustomLabelingPolicy |
python | walkccc__LeetCode | solutions/3096. Minimum Levels to Gain More Points/3096.py | {
"start": 0,
"end": 310
} | class ____:
def minimumLevels(self, possible: list[int]) -> int:
n = len(possible)
nums = [num if num == 1 else -1 for num in possible]
prefix = list(itertools.accumulate(nums, initial=0))
for i in range(1, n):
if prefix[i] > prefix[n] - prefix[i]:
return i
return -1
| Solution |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/markup.py | {
"start": 453,
"end": 8481
} | class ____(NamedTuple):
"""A tag in console markup."""
name: str
"""The tag name. e.g. 'bold'."""
parameters: Optional[str]
"""Any additional parameters after the name."""
def __str__(self) -> str:
return (
self.name if self.parameters is None else f"{self.name} {self.parameters}"
)
@property
def markup(self) -> str:
"""Get the string representation of this tag."""
return (
f"[{self.name}]"
if self.parameters is None
else f"[{self.name}={self.parameters}]"
)
_ReStringMatch = Match[str] # regex match object
_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
def escape(
markup: str,
_escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
) -> str:
"""Escapes text so that it won't be interpreted as markup.
Args:
markup (str): Content to be inserted in to markup.
Returns:
str: Markup with square brackets escaped.
"""
def escape_backslashes(match: Match[str]) -> str:
"""Called by re.sub replace matches."""
backslashes, text = match.groups()
return f"{backslashes}{backslashes}\\{text}"
markup = _escape(escape_backslashes, markup)
if markup.endswith("\\") and not markup.endswith("\\\\"):
return markup + "\\"
return markup
def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
"""Parse markup in to an iterable of tuples of (position, text, tag).
Args:
markup (str): A string containing console markup
"""
position = 0
_divmod = divmod
_Tag = Tag
for match in RE_TAGS.finditer(markup):
full_text, escapes, tag_text = match.groups()
start, end = match.span()
if start > position:
yield start, markup[position:start], None
if escapes:
backslashes, escaped = _divmod(len(escapes), 2)
if backslashes:
# Literal backslashes
yield start, "\\" * backslashes, None
start += backslashes * 2
if escaped:
# Escape of tag
yield start, full_text[len(escapes) :], None
position = end
continue
text, equals, parameters = tag_text.partition("=")
yield start, None, _Tag(text, parameters if equals else None)
position = end
if position < len(markup):
yield position, markup[position:], None
def render(
markup: str,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
"""Render console markup in to a Text instance.
Args:
markup (str): A string containing console markup.
style: (Union[str, Style]): The style to use.
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
Raises:
MarkupError: If there is a syntax error in the markup.
Returns:
Text: A test instance.
"""
emoji_replace = _emoji_replace
if "[" not in markup:
return Text(
emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
style=style,
)
text = Text(style=style)
append = text.append
normalize = Style.normalize
style_stack: List[Tuple[int, Tag]] = []
pop = style_stack.pop
spans: List[Span] = []
append_span = spans.append
_Span = Span
_Tag = Tag
def pop_style(style_name: str) -> Tuple[int, Tag]:
"""Pop tag matching given style name."""
for index, (_, tag) in enumerate(reversed(style_stack), 1):
if tag.name == style_name:
return pop(-index)
raise KeyError(style_name)
for position, plain_text, tag in _parse(markup):
if plain_text is not None:
# Handle open brace escapes, where the brace is not part of a tag.
plain_text = plain_text.replace("\\[", "[")
append(emoji_replace(plain_text) if emoji else plain_text)
elif tag is not None:
if tag.name.startswith("/"): # Closing tag
style_name = tag.name[1:].strip()
if style_name: # explicit close
style_name = normalize(style_name)
try:
start, open_tag = pop_style(style_name)
except KeyError:
raise MarkupError(
f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
) from None
else: # implicit close
try:
start, open_tag = pop()
except IndexError:
raise MarkupError(
f"closing tag '[/]' at position {position} has nothing to close"
) from None
if open_tag.name.startswith("@"):
if open_tag.parameters:
handler_name = ""
parameters = open_tag.parameters.strip()
handler_match = RE_HANDLER.match(parameters)
if handler_match is not None:
handler_name, match_parameters = handler_match.groups()
parameters = (
"()" if match_parameters is None else match_parameters
)
try:
meta_params = literal_eval(parameters)
except SyntaxError as error:
raise MarkupError(
f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
)
except Exception as error:
raise MarkupError(
f"error parsing {open_tag.parameters!r}; {error}"
) from None
if handler_name:
meta_params = (
handler_name,
meta_params
if isinstance(meta_params, tuple)
else (meta_params,),
)
else:
meta_params = ()
append_span(
_Span(
start, len(text), Style(meta={open_tag.name: meta_params})
)
)
else:
append_span(_Span(start, len(text), str(open_tag)))
else: # Opening tag
normalized_tag = _Tag(normalize(tag.name), tag.parameters)
style_stack.append((len(text), normalized_tag))
text_length = len(text)
while style_stack:
start, tag = style_stack.pop()
style = str(tag)
if style:
append_span(_Span(start, text_length, style))
text.spans = sorted(spans[::-1], key=attrgetter("start"))
return text
if __name__ == "__main__": # pragma: no cover
MARKUP = [
"[red]Hello World[/red]",
"[magenta]Hello [b]World[/b]",
"[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
":warning-emoji: [bold red blink] DANGER![/]",
]
from pipenv.patched.pip._vendor.rich import print
from pipenv.patched.pip._vendor.rich.table import Table
grid = Table("Markup", "Result", padding=(0, 1))
for markup in MARKUP:
grid.add_row(Text(markup), markup)
print(grid)
| Tag |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 1854,
"end": 10180
} | class ____(SitemapMixin, HasObservers, HasObservations, HasEvents, db.Model):
__tablename__ = "users"
__table_args__ = (
CheckConstraint("length(username) <= 50", name="users_valid_username_length"),
CheckConstraint(
"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
name="users_valid_username",
),
)
__repr__ = make_repr("username")
username: Mapped[CITEXT] = mapped_column(CITEXT, unique=True)
name: Mapped[str] = mapped_column(String(length=100))
password: Mapped[str] = mapped_column(String(length=128))
password_date: Mapped[datetime.datetime | None] = mapped_column(
TZDateTime, server_default=sql.func.now()
)
is_active: Mapped[bool_false]
is_frozen: Mapped[bool_false]
is_superuser: Mapped[bool_false]
is_support: Mapped[bool_false]
is_moderator: Mapped[bool_false]
is_psf_staff: Mapped[bool_false]
is_observer: Mapped[bool_false] = mapped_column(
comment="Is this user allowed to add Observations?"
)
prohibit_password_reset: Mapped[bool_false]
hide_avatar: Mapped[bool_false]
date_joined: Mapped[datetime_now | None]
last_login: Mapped[datetime.datetime | None] = mapped_column(
TZDateTime, server_default=sql.func.now()
)
disabled_for: Mapped[DisableReason | None]
totp_secret: Mapped[int | None] = mapped_column(LargeBinary(length=20))
last_totp_value: Mapped[str | None]
webauthn: Mapped[list[WebAuthn]] = orm.relationship(
back_populates="user", cascade="all, delete-orphan", lazy=True
)
recovery_codes: Mapped[list[RecoveryCode]] = orm.relationship(
back_populates="user", cascade="all, delete-orphan", lazy="dynamic"
)
emails: Mapped[list[Email]] = orm.relationship(
back_populates="user", cascade="all, delete-orphan", lazy=False
)
macaroons: Mapped[list[Macaroon]] = orm.relationship(
cascade="all, delete-orphan",
lazy=True,
order_by="Macaroon.created.desc()",
)
unique_logins: Mapped[list[UserUniqueLogin]] = orm.relationship(
back_populates="user", cascade="all, delete-orphan", lazy=True
)
role_invitations: Mapped[list[RoleInvitation]] = orm.relationship(
"RoleInvitation",
back_populates="user",
)
organization_applications: Mapped[list[OrganizationApplication]] = orm.relationship(
back_populates="submitted_by",
cascade="all, delete-orphan",
)
organizations: Mapped[list[Organization]] = orm.relationship(
secondary="organization_roles",
back_populates="users",
lazy=True,
order_by="Organization.name",
viewonly=True,
)
pending_oidc_publishers: Mapped[list[PendingOIDCPublisher]] = orm.relationship(
back_populates="added_by",
cascade="all, delete-orphan",
lazy=True,
)
projects: Mapped[list[Project]] = orm.relationship(
secondary="roles",
back_populates="users",
lazy=True,
viewonly=True,
order_by="Project.normalized_name",
)
organization_roles: Mapped[list[OrganizationRole]] = orm.relationship(
back_populates="user",
cascade="all, delete-orphan",
lazy=True,
viewonly=True,
)
organization_invitations: Mapped[list[OrganizationInvitation]] = orm.relationship(
back_populates="user",
)
teams: Mapped[list[Team]] = orm.relationship(
secondary="team_roles",
back_populates="members",
lazy=True,
viewonly=True,
order_by="Team.name",
)
terms_of_service_engagements: Mapped[list[UserTermsOfServiceEngagement]] = (
orm.relationship(
back_populates="user",
cascade="all, delete-orphan",
lazy=True,
viewonly=True,
)
)
@property
def primary_email(self):
primaries = [x for x in self.emails if x.primary]
if primaries:
return primaries[0]
@property
def public_email(self):
publics = [x for x in self.emails if x.public]
if publics:
return publics[0]
@hybrid_property
def email(self):
primary_email = self.primary_email
return primary_email.email if primary_email else None
@email.expression # type: ignore
def email(self):
return (
select(Email.email)
.where((Email.user_id == self.id) & (Email.primary.is_(True)))
.scalar_subquery()
)
@property
def has_two_factor(self):
return self.has_totp or self.has_webauthn
@property
def has_totp(self):
return self.totp_secret is not None
@property
def has_webauthn(self):
return len(self.webauthn) > 0
@property
def has_single_2fa(self):
if self.has_totp:
return not self.webauthn
return len(self.webauthn) == 1
@property
def has_recovery_codes(self):
return any(not code.burned for code in self.recovery_codes)
@property
def has_burned_recovery_codes(self):
return any(code.burned for code in self.recovery_codes)
@property
def has_primary_verified_email(self):
return self.primary_email is not None and self.primary_email.verified
@property
def recent_events(self):
session = orm_session_from_obj(self)
last_ninety = datetime.datetime.now() - datetime.timedelta(days=90)
return (
session.query(User.Event)
.filter(
(User.Event.source_id == self.id) & (User.Event.time >= last_ninety)
)
.order_by(User.Event.time.desc())
)
@property
def can_reset_password(self):
return not any(
[
self.is_superuser,
self.is_support,
self.is_moderator,
self.is_psf_staff,
self.prohibit_password_reset,
]
)
@property
def active_account_recoveries(self):
return [
observation
for observation in self.observations
if observation.kind == ObservationKind.AccountRecovery.value[0]
and observation.additional["status"] == "initiated"
]
def __principals__(self) -> list[str]:
principals = [Authenticated, f"user:{self.id}"]
if self.is_superuser:
principals.append("group:admins")
if self.is_support:
principals.append("group:support")
if self.is_moderator or self.is_superuser or self.is_support:
principals.append("group:moderators")
if self.is_psf_staff or self.is_superuser:
principals.append("group:psf_staff")
if self.is_observer or self.is_superuser:
principals.append("group:observers")
return principals
def __acl__(self):
# TODO: This ACL is duplicating permissions set in RootFactory.__acl__
# If nothing else, setting the ACL on the model is more restrictive
# than RootFactory.__acl__, which is why we duplicate
# AdminDashboardSidebarRead here, otherwise the sidebar is not displayed.
return [
(
Allow,
"group:admins",
(
Permissions.AdminProjectsWrite,
Permissions.AdminUsersRead,
Permissions.AdminUsersWrite,
Permissions.AdminUsersEmailWrite,
Permissions.AdminUsersAccountRecoveryWrite,
Permissions.AdminDashboardSidebarRead,
),
),
(
Allow,
"group:support",
(
Permissions.AdminUsersRead,
Permissions.AdminUsersEmailWrite,
Permissions.AdminUsersAccountRecoveryWrite,
Permissions.AdminDashboardSidebarRead,
),
),
(
Allow,
"group:moderators",
(Permissions.AdminUsersRead, Permissions.AdminDashboardSidebarRead),
),
]
def __lt__(self, other):
return self.username < other.username
| User |
python | getsentry__sentry | src/sentry/grouping/variants.py | {
"start": 693,
"end": 2063
} | class ____(ABC):
variant_name: str | None = None
@property
def contributes(self) -> bool:
return True
@property
@abstractmethod
def type(self) -> str: ...
def get_hash(self) -> str | None:
return None
@property
def key(self) -> str:
return self.type
@property
def description(self) -> str:
return self.type
@property
def hint(self) -> str | None:
return None
# This has to return `Mapping` rather than `dict` so that subtypes can override the return value
# with a TypedDict if they choose. See https://github.com/python/mypy/issues/4976.
def _get_metadata_as_dict(self) -> Mapping[str, Any]:
return {}
def as_dict(self) -> dict[str, Any]:
rv = {
"type": self.type,
"key": self.key,
"description": self.description,
"hash": self.get_hash(),
"hint": self.hint,
"contributes": self.contributes,
}
rv.update(self._get_metadata_as_dict())
return rv
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.get_hash()!r} ({self.type})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, BaseVariant):
return NotImplemented
return self.as_dict() == other.as_dict()
| BaseVariant |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 258356,
"end": 258694
} | class ____: # test for ticket:992
def setup_method(self):
self.rng = np.random.default_rng(7556981556)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5), random_state=self.rng)
assert_equal(rvs.shape, (10, 5))
| TestArrayArgument |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 220808,
"end": 221061
} | class ____(VegaLiteSchema):
"""ConditionalAxisNumberArray schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisNumberArray"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalAxisNumberArray |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/extractors/manager.py | {
"start": 2050,
"end": 12873
} | class ____(LoggingMixin):
"""Class abstracting management of custom extractors."""
def __init__(self):
super().__init__()
self.extractors: dict[str, type[BaseExtractor]] = {}
self.default_extractor = DefaultExtractor
# Built-in Extractors like Bash and Python
for extractor in _iter_extractor_types():
for operator_class in extractor.get_operator_classnames():
self.extractors[operator_class] = extractor
for extractor_path in conf.custom_extractors():
extractor: type[BaseExtractor] | None = try_import_from_string(extractor_path)
if not extractor:
self.log.warning(
"OpenLineage is unable to import custom extractor `%s`; will ignore it.",
extractor_path,
)
continue
for operator_class in extractor.get_operator_classnames():
if operator_class in self.extractors:
self.log.warning(
"Duplicate OpenLineage custom extractor found for `%s`. "
"`%s` will be used instead of `%s`",
operator_class,
extractor_path,
self.extractors[operator_class],
)
self.extractors[operator_class] = extractor
self.log.debug(
"Registered custom OpenLineage extractor `%s` for class `%s`",
extractor_path,
operator_class,
)
def add_extractor(self, operator_class: str, extractor: type[BaseExtractor]):
self.extractors[operator_class] = extractor
def extract_metadata(
self, dagrun, task, task_instance_state: TaskInstanceState, task_instance=None
) -> OperatorLineage:
extractor = self._get_extractor(task)
task_info = (
f"task_type={task.task_type} "
f"airflow_dag_id={task.dag_id} "
f"task_id={task.task_id} "
f"airflow_run_id={dagrun.run_id} "
)
if extractor:
# Extracting advanced metadata is only possible when extractor for particular operator
# is defined. Without it, we can't extract any input or output data.
try:
self.log.debug(
"Using extractor %s %s",
extractor.__class__.__name__,
str(task_info),
)
if task_instance_state == TaskInstanceState.RUNNING:
task_metadata = extractor.extract()
elif task_instance_state == TaskInstanceState.FAILED:
if callable(getattr(extractor, "extract_on_failure", None)):
task_metadata = extractor.extract_on_failure(task_instance)
else:
task_metadata = extractor.extract_on_complete(task_instance)
else:
task_metadata = extractor.extract_on_complete(task_instance)
self.log.debug(
"Found task metadata for operation %s: %s",
task.task_id,
str(task_metadata),
)
task_metadata = self.validate_task_metadata(task_metadata)
if task_metadata:
if (not task_metadata.inputs) and (not task_metadata.outputs):
if (hook_lineage := self.get_hook_lineage()) is not None:
inputs, outputs = hook_lineage
task_metadata.inputs = inputs
task_metadata.outputs = outputs
else:
self.extract_inlets_and_outlets(task_metadata, task)
return task_metadata
except Exception as e:
self.log.warning(
"Failed to extract metadata using found extractor %s - %s %s",
extractor,
e,
task_info,
)
self.log.debug("OpenLineage extraction failure details:", exc_info=True)
elif (hook_lineage := self.get_hook_lineage()) is not None:
inputs, outputs = hook_lineage
task_metadata = OperatorLineage(inputs=inputs, outputs=outputs)
return task_metadata
else:
self.log.debug("Unable to find an extractor %s", task_info)
# Only include the unkonwnSourceAttribute facet if there is no extractor
task_metadata = OperatorLineage(
run_facets=get_unknown_source_attribute_run_facet(task=task),
)
self.extract_inlets_and_outlets(task_metadata, task)
return task_metadata
return OperatorLineage()
def get_extractor_class(self, task: BaseOperator) -> type[BaseExtractor] | None:
if task.task_type in self.extractors:
return self.extractors[task.task_type]
def method_exists(method_name):
return callable(getattr(task, method_name, None))
if method_exists(OL_METHOD_NAME_START) or method_exists(OL_METHOD_NAME_COMPLETE):
return self.default_extractor
return None
def _get_extractor(self, task: BaseOperator) -> BaseExtractor | None:
# TODO: Re-enable in Extractor PR
# self.instantiate_abstract_extractors(task)
extractor = self.get_extractor_class(task)
self.log.debug("extractor for %s is %s", task.task_type, extractor)
if extractor:
return extractor(task)
return None
def extract_inlets_and_outlets(self, task_metadata: OperatorLineage, task) -> None:
if task.inlets or task.outlets:
self.log.debug("Manually extracting lineage metadata from inlets and outlets")
for i in task.inlets:
d = self.convert_to_ol_dataset(i)
if d:
task_metadata.inputs.append(d)
for o in task.outlets:
d = self.convert_to_ol_dataset(o)
if d:
task_metadata.outputs.append(d)
def get_hook_lineage(self) -> tuple[list[Dataset], list[Dataset]] | None:
try:
from airflow.providers.common.compat.lineage.hook import get_hook_lineage_collector
except ImportError:
return None
if not hasattr(get_hook_lineage_collector(), "has_collected"):
return None
if not get_hook_lineage_collector().has_collected:
return None
self.log.debug("OpenLineage will extract lineage from Hook Lineage Collector.")
return (
[
asset
for asset_info in get_hook_lineage_collector().collected_assets.inputs
if (asset := translate_airflow_asset(asset_info.asset, asset_info.context)) is not None
],
[
asset
for asset_info in get_hook_lineage_collector().collected_assets.outputs
if (asset := translate_airflow_asset(asset_info.asset, asset_info.context)) is not None
],
)
@staticmethod
def convert_to_ol_dataset_from_object_storage_uri(uri: str) -> Dataset | None:
from urllib.parse import urlparse
from openlineage.client.event_v2 import Dataset
if "/" not in uri:
return None
try:
scheme, netloc, path, params, _, _ = urlparse(uri)
except Exception:
return None
common_schemas = {
"s3": "s3",
"gs": "gs",
"gcs": "gs",
"hdfs": "hdfs",
"file": "file",
}
for found, final in common_schemas.items():
if scheme.startswith(found):
return Dataset(namespace=f"{final}://{netloc}", name=path.lstrip("/"))
return Dataset(namespace=scheme, name=f"{netloc}{path}")
@staticmethod
def convert_to_ol_dataset_from_table(table: Table) -> Dataset:
from openlineage.client.event_v2 import Dataset
from openlineage.client.facet_v2 import (
DatasetFacet,
documentation_dataset,
ownership_dataset,
schema_dataset,
)
facets: dict[str, DatasetFacet] = {}
if table.columns:
facets["schema"] = schema_dataset.SchemaDatasetFacet(
fields=[
schema_dataset.SchemaDatasetFacetFields(
name=column.name,
type=column.data_type,
description=column.description,
)
for column in table.columns
]
)
if table.owners:
facets["ownership"] = ownership_dataset.OwnershipDatasetFacet(
owners=[
ownership_dataset.Owner(
# f.e. "user:John Doe <jdoe@company.com>" or just "user:<jdoe@company.com>"
name=f"user:"
f"{user.first_name + ' ' if user.first_name else ''}"
f"{user.last_name + ' ' if user.last_name else ''}"
f"<{user.email}>",
type="",
)
for user in table.owners
]
)
if table.description:
facets["documentation"] = documentation_dataset.DocumentationDatasetFacet(
description=table.description
)
return Dataset(
namespace=f"{table.cluster}",
name=f"{table.database}.{table.name}",
facets=facets,
)
@staticmethod
def convert_to_ol_dataset(obj) -> Dataset | None:
from openlineage.client.event_v2 import Dataset
from airflow.providers.common.compat.lineage.entities import File, Table
if isinstance(obj, Dataset):
return obj
if isinstance(obj, Table):
return ExtractorManager.convert_to_ol_dataset_from_table(obj)
if isinstance(obj, File):
return ExtractorManager.convert_to_ol_dataset_from_object_storage_uri(obj.url)
return None
def validate_task_metadata(self, task_metadata) -> OperatorLineage | None:
try:
return OperatorLineage(
inputs=task_metadata.inputs,
outputs=task_metadata.outputs,
run_facets=task_metadata.run_facets,
job_facets=task_metadata.job_facets,
)
except AttributeError:
self.log.warning("OpenLineage extractor returns non-valid metadata: `%s`", task_metadata)
return None
| ExtractorManager |
python | getsentry__sentry | src/sentry/search/events/datasets/spans_metrics.py | {
"start": 898,
"end": 59350
} | class ____(DatasetConfig):
missing_function_error = IncompatibleMetricsQuery
nullable_metrics = {
constants.SPAN_MESSAGING_LATENCY,
constants.SPAN_METRICS_MAP["cache.item_size"],
constants.SPAN_METRICS_MAP["ai.total_cost"],
constants.SPAN_METRICS_MAP["ai.total_tokens.used"],
}
def __init__(self, builder: spans_metrics.SpansMetricsQueryBuilder):
self.builder = builder
self.total_span_duration: float | None = None
@property
def search_filter_converter(
self,
) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]:
return {
"message": self._message_filter_converter,
constants.SPAN_DOMAIN_ALIAS: self._span_domain_filter_converter,
constants.DEVICE_CLASS_ALIAS: self._device_class_filter_converter,
}
@property
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
return {
constants.SPAN_MODULE_ALIAS: self._resolve_span_module,
constants.SPAN_DOMAIN_ALIAS: self._resolve_span_domain,
constants.UNIQUE_SPAN_DOMAIN_ALIAS: self._resolve_unique_span_domains,
constants.DEVICE_CLASS_ALIAS: lambda alias: field_aliases.resolve_device_class(
self.builder, alias
),
constants.PROJECT_ALIAS: lambda alias: field_aliases.resolve_project_slug_alias(
self.builder, alias
),
constants.PROJECT_NAME_ALIAS: lambda alias: field_aliases.resolve_project_slug_alias(
self.builder, alias
),
constants.MESSAGING_OPERATION_TYPE_ALIAS: lambda alias: field_aliases.resolve_column_if_exists(
self.builder, alias
),
constants.MESSAGING_OPERATION_NAME_ALIAS: lambda alias: field_aliases.resolve_column_if_exists(
self.builder, alias
),
}
def resolve_metric(self, value: str) -> int:
metric_id = self.builder.resolve_metric_index(constants.SPAN_METRICS_MAP.get(value, value))
# If its still None its not a custom measurement
if metric_id is None:
if constants.SPAN_METRICS_MAP.get(value, value) in self.nullable_metrics:
metric_id = 0
else:
raise IncompatibleMetricsQuery(f"Metric: {value} could not be resolved")
if metric_id != 0:
self.builder.metric_ids.add(metric_id)
return metric_id
@property
def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
"""While the final functions in clickhouse must have their -Merge combinators in order to function, we don't
need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions
like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)
Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the
Metric Layer will actually handle which dataset each function goes to
"""
resolve_metric_id = {
"name": "metric_id",
"fn": lambda args: self.resolve_metric(args["column"]),
}
function_converter = {
function.name: function
for function in [
fields.MetricsFunction(
"count_unique",
required_args=[
fields.MetricArg(
"column",
allowed_columns=["user", "transaction"],
allow_custom_measurements=False,
)
],
calculated_args=[resolve_metric_id],
snql_set=lambda args, alias: Function(
"uniqIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"epm",
snql_distribution=self._resolve_epm,
optional_args=[fields.IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
fields.MetricsFunction(
"eps",
snql_distribution=self._resolve_eps,
optional_args=[fields.IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
fields.MetricsFunction(
"count",
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"count_if",
required_args=[
fields.MetricArg(
"if_col",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"if_val", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[self.builder.column(args["if_col"]), args["if_val"]],
),
],
),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"division",
required_args=[
fields.MetricArg(
# the dividend, needs to be named column, otherwise the query builder won't be able to determine the correct target table
"column",
allow_custom_measurements=False,
),
fields.MetricArg(
"divisorColumn",
allow_custom_measurements=False,
),
],
snql_gauge=self._resolve_division,
snql_distribution=self._resolve_division,
default_result_type="percentage",
),
fields.MetricsFunction(
"division_if",
required_args=[
fields.MetricArg(
# the dividend, needs to be named column, otherwise the query builder won't be able to determine the correct target table
"column",
allow_custom_measurements=False,
),
fields.MetricArg(
"divisorColumn",
allow_custom_measurements=False,
),
fields.MetricArg(
"if_col",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"if_val", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
snql_gauge=self._resolve_division_if,
snql_distribution=self._resolve_division_if,
default_result_type="percentage",
),
fields.MetricsFunction(
"sum",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_SUMMABLE_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
),
snql_counter=lambda args, alias: Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"avg",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.SPAN_METRIC_BYTES_COLUMNS
| constants.SPAN_METRIC_COUNT_COLUMNS,
),
),
],
calculated_args=[resolve_metric_id],
snql_gauge=self._resolve_avg,
snql_distribution=self._resolve_avg,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"avg_if",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.SPAN_METRIC_COUNT_COLUMNS,
),
fields.MetricArg(
"if_col",
allowed_columns=["release", "span.op"],
),
fields.SnQLStringArg(
"if_val", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
calculated_args=[resolve_metric_id],
snql_gauge=self._resolve_avg_if,
snql_distribution=self._resolve_avg_if,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"percentile",
required_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column", allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
),
),
fields.NumberRange("percentile", 0, 1),
],
calculated_args=[resolve_metric_id],
snql_distribution=function_aliases.resolve_metrics_percentile,
is_percentile=True,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p50",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=0.50
),
is_percentile=True,
default_result_type="duration",
),
fields.MetricsFunction(
"p75",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=0.75
),
is_percentile=True,
default_result_type="duration",
),
fields.MetricsFunction(
"p95",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=0.95
),
is_percentile=True,
default_result_type="duration",
),
fields.MetricsFunction(
"p99",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=0.99
),
is_percentile=True,
default_result_type="duration",
),
fields.MetricsFunction(
"p100",
optional_args=[
fields.with_default(
"span.self_time",
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=1
),
is_percentile=True,
default_result_type="duration",
),
fields.MetricsFunction(
"time_spent_percentage",
optional_args=[
fields.with_default(
"app", fields.SnQLStringArg("scope", allowed_strings=["app", "local"])
),
fields.with_default(
"span.self_time",
fields.MetricArg(
"column", allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
),
),
],
snql_distribution=self._resolve_time_spent_percentage,
default_result_type="percentage",
),
fields.MetricsFunction(
"http_response_rate",
required_args=[
SnQLStringArg("code"),
],
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_http_response_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"cache_hit_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_cache_hit_count(args),
self._resolve_cache_hit_and_miss_count(args),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"cache_miss_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_cache_miss_count(args),
self._resolve_cache_hit_and_miss_count(args),
alias,
),
default_result_type="percentage",
),
# TODO: Deprecated, use `http_response_rate(5)` instead
fields.MetricsFunction(
"http_error_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_http_error_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"http_response_count",
required_args=[
SnQLStringArg("code"),
],
snql_distribution=self._resolve_http_response_count,
default_result_type="integer",
),
# TODO: Deprecated, use `http_response_count(5)` instead
fields.MetricsFunction(
"http_error_count",
snql_distribution=self._resolve_http_error_count,
default_result_type="integer",
),
fields.MetricsFunction(
"ttid_contribution_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_ttid_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"ttid_count",
snql_distribution=self._resolve_ttid_count,
default_result_type="integer",
),
fields.MetricsFunction(
"ttfd_contribution_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_ttfd_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"ttfd_count",
snql_distribution=self._resolve_ttfd_count,
default_result_type="integer",
),
fields.MetricsFunction(
"main_thread_count",
snql_distribution=self._resolve_main_thread_count,
default_result_type="integer",
),
fields.MetricsFunction(
"avg_compare",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.SPAN_METRIC_COUNT_COLUMNS,
allow_custom_measurements=False,
),
fields.MetricArg(
"comparison_column",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"first_value", unquote=True, unescape_quotes=True, optional_unquote=True
),
fields.SnQLStringArg(
"second_value",
unquote=True,
unescape_quotes=True,
optional_unquote=True,
),
],
calculated_args=[resolve_metric_id],
snql_gauge=self._resolve_avg_compare,
snql_distribution=self._resolve_avg_compare,
default_result_type="percent_change",
),
fields.MetricsFunction(
"regression_score",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
fields.TimestampArg("timestamp"),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_regression_score,
default_result_type="number",
),
fields.MetricsFunction(
"avg_by_timestamp",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
),
fields.SnQLStringArg("condition", allowed_strings=["greater", "less"]),
fields.TimestampArg("timestamp"),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_avg_condition(
args, args["condition"], alias
),
default_result_type="duration",
),
fields.MetricsFunction(
"epm_by_timestamp",
required_args=[
fields.SnQLStringArg("condition", allowed_strings=["greater", "less"]),
fields.TimestampArg("timestamp"),
],
snql_distribution=lambda args, alias: self._resolve_epm_condition(
args, args["condition"], alias
),
default_result_type="rate",
),
fields.MetricsFunction(
"any",
required_args=[fields.MetricArg("column")],
# Not actually using `any` so that this function returns consistent results
snql_distribution=lambda args, alias: Function(
"min",
[self.builder.column(args["column"])],
alias,
),
result_type_fn=self.reflective_result_type(),
default_result_type="string",
redundant_grouping=True,
),
fields.MetricsFunction(
"count_op",
required_args=[
SnQLStringArg("op"),
],
snql_distribution=self._resolve_count_op,
default_result_type="integer",
),
fields.MetricsFunction(
"count_publish",
snql_distribution=self._resolve_count_publish,
default_result_type="integer",
),
fields.MetricsFunction(
"count_process",
snql_distribution=self._resolve_count_process,
default_result_type="integer",
),
fields.MetricsFunction(
"avg_if_publish",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.SPAN_METRIC_COUNT_COLUMNS,
),
],
calculated_args=[resolve_metric_id],
snql_gauge=self._resolve_avg_if_publish,
snql_distribution=self._resolve_avg_if_publish,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"avg_if_process",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.SPAN_METRIC_COUNT_COLUMNS,
),
],
calculated_args=[resolve_metric_id],
snql_gauge=self._resolve_avg_if_process,
snql_distribution=self._resolve_avg_if_process,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"trace_status_rate",
required_args=[
SnQLStringArg("status"),
],
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_trace_status_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"trace_error_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_trace_error_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
]
}
for alias, name in constants.SPAN_FUNCTION_ALIASES.items():
if name in function_converter:
function_converter[alias] = function_converter[name].alias_as(alias)
return function_converter
def _message_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.message_filter_converter(self.builder, search_filter)
def _span_domain_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
value = search_filter.value.value
if search_filter.value.is_wildcard():
value = search_filter.value.value[1:-1]
return Condition(
Function(
"arrayExists",
[
Function(
"lambda",
[
Function("tuple", [Identifier("x")]),
Function("match", [Identifier("x"), f"(?i){value}"]),
],
),
self._resolve_span_domain(),
],
),
Op(search_filter.operator),
1,
)
elif value == "":
operator = Op.LTE if search_filter.operator == "=" else Op.GT
return Condition(Function("length", [self._resolve_span_domain()]), operator, 0)
else:
return Condition(
Function("has", [self._resolve_span_domain(), value]),
Op.NEQ if search_filter.operator in constants.EQUALITY_OPERATORS else Op.EQ,
0,
)
def _device_class_filter_converter(self, search_filter: SearchFilter) -> SelectType:
return filter_aliases.device_class_converter(
self.builder, search_filter, {**DEVICE_CLASS, "Unknown": {""}}
)
def _resolve_span_module(self, alias: str) -> SelectType:
return field_aliases.resolve_span_module(self.builder, alias)
def _resolve_span_domain(self, alias: str | None = None) -> SelectType:
return Function(
"arrayFilter",
[
Function(
"lambda",
[Function("tuple", [Identifier("x")]), Function("notEmpty", [Identifier("x")])],
),
Function(
"splitByChar",
[
constants.SPAN_DOMAIN_SEPARATOR,
self.builder.column("span.domain"),
],
),
],
alias,
)
def _resolve_unique_span_domains(
self,
alias: str | None = None,
) -> SelectType:
return Function("arrayJoin", [self._resolve_span_domain()], alias)
# Query Functions
def _resolve_count_if(
self,
metric_condition: Function,
condition: Function,
alias: str | None = None,
) -> SelectType:
return Function(
"countIf",
[
Column("value"),
Function(
"and",
[
metric_condition,
condition,
],
),
],
alias,
)
def _resolve_total_span_duration(self, alias: str, scope: str, column: str) -> SelectType:
"""This calculates the total time, and based on the scope will return
either the apps total time or whatever other local scope/filters are
applied.
This must be cached since it runs another query."""
self.builder.requires_other_aggregates = True
if self.total_span_duration is not None:
return Function("toFloat64", [self.total_span_duration], alias)
total_query = spans_metrics.SpansMetricsQueryBuilder(
dataset=self.builder.dataset,
params={},
snuba_params=self.builder.params,
query=self.builder.query if scope == "local" else None,
selected_columns=[f"sum({column})"],
)
sentry_sdk.set_tag("query.resolved_total", scope)
total_results = total_query.run_query(
Referrer.API_DISCOVER_TOTAL_SUM_TRANSACTION_DURATION_FIELD.value
)
results = total_query.process_results(total_results)
if len(results["data"]) != 1:
self.total_span_duration = 0
return Function("toFloat64", [0], alias)
self.total_span_duration = results["data"][0][get_function_alias(f"sum({column})")]
return Function("toFloat64", [self.total_span_duration], alias)
def _resolve_time_spent_percentage(self, args: Args, alias: str) -> SelectType:
total_time = self._resolve_total_span_duration(
constants.TOTAL_SPAN_DURATION_ALIAS, args["scope"], args["column"]
)
metric_id = self.resolve_metric(args["column"])
return function_aliases.resolve_division(
Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
),
total_time,
alias,
)
def _resolve_cache_hit_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("cache.hit"),
self.builder.resolve_tag_value("true"),
],
),
alias,
)
def _resolve_cache_miss_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("cache.hit"),
self.builder.resolve_tag_value("false"),
],
),
alias,
)
def _resolve_cache_hit_and_miss_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
statuses = [self.builder.resolve_tag_value(status) for status in constants.CACHE_HIT_STATUS]
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"in",
[
self.builder.column("cache.hit"),
list(status for status in statuses if status is not None),
],
),
alias,
)
def _resolve_http_response_count(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
condition = Function(
"startsWith",
[
self.builder.column("span.status_code"),
args["code"],
],
)
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
condition,
alias,
)
def _resolve_http_error_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
statuses = [
self.builder.resolve_tag_value(status) for status in constants.HTTP_SERVER_ERROR_STATUS
]
base_condition = Function(
"in",
[
self.builder.column("span.status_code"),
list(status for status in statuses if status is not None),
],
)
if extra_condition:
condition = Function(
"and",
[
base_condition,
extra_condition,
],
)
else:
condition = base_condition
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
condition,
alias,
)
def _resolve_main_thread_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("span.main_thread"),
self.builder.resolve_tag_value("true"),
],
),
alias,
)
def _resolve_ttid_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("ttid"),
self.builder.resolve_tag_value("ttid"),
],
),
alias,
)
def _resolve_ttfd_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("ttfd"),
self.builder.resolve_tag_value("ttfd"),
],
),
alias,
)
def _resolve_epm(
self,
args: dict[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
if hasattr(self.builder, "interval"):
args["interval"] = self.builder.interval
return self._resolve_rate(60, args, alias, extra_condition)
def _resolve_eps(
self,
args: dict[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
if hasattr(self.builder, "interval"):
args["interval"] = self.builder.interval
return self._resolve_rate(None, args, alias, extra_condition)
def _resolve_rate(
self,
interval: int | None,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
base_condition = Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
)
if extra_condition:
condition = Function("and", [base_condition, extra_condition])
else:
condition = base_condition
return Function(
"divide",
[
Function(
"countIf",
[
Column("value"),
condition,
],
),
(
args["interval"]
if interval is None
else Function("divide", [args["interval"], interval])
),
],
alias,
)
def _resolve_regression_score(
self,
args: Mapping[str, str | Column | SelectType | int | float | datetime],
alias: str | None = None,
) -> SelectType:
return Function(
"minus",
[
Function(
"multiply",
[
self._resolve_avg_condition(args, "greater"),
self._resolve_epm_condition(args, "greater"),
],
),
Function(
"multiply",
[
self._resolve_avg_condition(args, "less"),
self._resolve_epm_condition(args, "less"),
],
),
],
alias,
)
def _resolve_epm_condition(
self,
args: Mapping[str, str | Column | SelectType | int | float | datetime],
condition: str,
alias: str | None = None,
) -> SelectType:
timestamp = args["timestamp"]
if condition == "greater":
assert isinstance(self.builder.params.end, datetime) and isinstance(
timestamp, datetime
), f"params.end: {self.builder.params.end} - timestamp: {timestamp}"
interval = (self.builder.params.end - timestamp).total_seconds()
elif condition == "less":
assert isinstance(self.builder.params.start, datetime) and isinstance(
timestamp, datetime
), f"params.start: {self.builder.params.start} - timestamp: {timestamp}"
interval = (timestamp - self.builder.params.start).total_seconds()
else:
raise InvalidSearchQuery(f"Unsupported condition for epm: {condition}")
return Function(
"divide",
[
Function(
"countIf",
[
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.duration"),
],
),
Function(
condition,
[
Column("timestamp"),
args["timestamp"],
],
),
],
)
],
),
Function("divide", [interval, 60]),
],
alias,
)
def _resolve_avg_condition(
self,
args: Mapping[str, str | Column | SelectType | int | float],
condition: str,
alias: str | None = None,
) -> SelectType:
column = args["column"]
assert isinstance(column, str), f"column: {column}"
conditional_aggregate = Function(
"avgIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(column),
],
),
Function(condition, [Column("timestamp"), args["timestamp"]]),
],
),
],
)
return Function(
"if",
[
Function("isNaN", [conditional_aggregate]),
0,
conditional_aggregate,
],
alias,
)
def _resolve_count_op(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
op = args["op"]
assert isinstance(op, str), f"op: {op}"
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("span.op"),
self.builder.resolve_tag_value(op),
],
),
alias,
)
def _is_messaging_op(self, op: str, operation_name: str, operation_type: str) -> Function:
hasOperationNameColumn = self.builder.resolve_tag_key("messaging.operation.name")
hasOperationTypeColumn = self.builder.resolve_tag_key("messaging.operation.type")
return Function(
"or",
[
Function(
"equals",
[
self.builder.column("span.op"),
self.builder.resolve_tag_value(op),
],
),
hasOperationTypeColumn
and Function(
"equals",
[
self.builder.column("messaging.operation.type"),
self.builder.resolve_tag_value(operation_type),
],
),
hasOperationNameColumn
and Function(
"equals",
[
self.builder.column("messaging.operation.name"),
self.builder.resolve_tag_value(operation_name),
],
),
],
)
def _resolve_count_publish(self, args, alias):
op = "queue.publish"
operation_name = "publish"
operation_type = "create"
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
self._is_messaging_op(op, operation_name, operation_type),
alias,
)
def _resolve_count_process(self, args, alias):
op = "queue.process"
operation_name = "process"
operation_type = "process"
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
self._is_messaging_op(op, operation_name, operation_type),
alias,
)
def _resolve_avg_if_publish(self, args, alias):
op = "queue.publish"
operation_name = "publish"
operation_type = "create"
return Function(
"avgIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
self._is_messaging_op(op, operation_name, operation_type),
],
),
],
alias,
)
def _resolve_avg_if_process(self, args, alias):
op = "queue.process"
operation_name = "process"
operation_type = "process"
return Function(
"avgIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
self._is_messaging_op(op, operation_name, operation_type),
],
),
],
alias,
)
def _resolve_sum(self, metric_name: str, alias: str | None = None):
return Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(metric_name),
],
),
],
alias,
)
def _resolve_avg(self, args, alias):
return Function(
"avgIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
)
def _resolve_avg_if(self, args, alias):
return Function(
"avgIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
Function(
"equals",
[self.builder.column(args["if_col"]), args["if_val"]],
),
],
),
],
alias,
)
def _resolve_sum_if(
self,
metric_name: str,
if_col_name: str,
if_val: SelectType,
alias: str | None = None,
) -> SelectType:
return Function(
"sumIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(metric_name),
],
),
Function(
"equals",
[self.builder.column(if_col_name), if_val],
),
],
),
],
alias,
)
def _resolve_division_if(
self,
args: Mapping[str, str | Column | SelectType],
alias: str,
) -> SelectType:
return function_aliases.resolve_division(
self._resolve_sum_if(args["column"], args["if_col"], args["if_val"]),
self._resolve_sum_if(args["divisorColumn"], args["if_col"], args["if_val"]),
alias,
)
def _resolve_division(
self,
args: Mapping[str, str | Column | SelectType],
alias: str,
) -> SelectType:
return function_aliases.resolve_division(
self._resolve_sum(args["column"], None),
self._resolve_sum(args["divisorColumn"], None),
alias,
)
def _resolve_avg_compare(self, args, alias):
return function_aliases.resolve_avg_compare(self.builder.column, args, alias)
def _resolve_trace_status_count(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
condition = Function(
"equals",
[
self.builder.column("trace.status"),
args["status"],
],
)
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
condition,
alias,
)
def _resolve_trace_error_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
success_statuses = [
self.builder.resolve_tag_value(status) for status in constants.NON_FAILURE_STATUS
]
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"not",
[
Function(
"in",
[
self.builder.column("trace.status"),
list(status for status in success_statuses if status is not None),
],
)
],
),
alias,
)
@property
def orderby_converter(self) -> Mapping[str, OrderBy]:
return {}
| SpansMetricsDatasetConfig |
python | pydata__xarray | xarray/tests/test_duck_array_ops.py | {
"start": 7852,
"end": 9738
} | class ____(TestOps):
@pytest.fixture(autouse=True)
def setUp(self):
import dask.array
self.x = dask.array.from_array(
[
[
[nan, nan, 2.0, nan],
[nan, 5.0, 6.0, nan],
[8.0, 9.0, 10.0, nan],
],
[
[nan, 13.0, 14.0, 15.0],
[nan, 17.0, 18.0, nan],
[nan, 21.0, nan, nan],
],
],
chunks=(2, 1, 2),
)
def test_cumsum_1d():
inputs = np.array([0, 1, 2, 3])
expected = np.array([0, 1, 3, 6])
actual = duck_array_ops.cumsum(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=0)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=-1)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=(0,))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=())
assert_array_equal(inputs, actual)
def test_cumsum_2d():
inputs = np.array([[1, 2], [3, 4]])
expected = np.array([[1, 3], [4, 10]])
actual = duck_array_ops.cumsum(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=(0, 1))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=())
assert_array_equal(inputs, actual)
def test_cumprod_2d():
inputs = np.array([[1, 2], [3, 4]])
expected = np.array([[1, 2], [3, 2 * 3 * 4]])
actual = duck_array_ops.cumprod(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumprod(inputs, axis=(0, 1))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumprod(inputs, axis=())
assert_array_equal(inputs, actual)
| TestDaskOps |
python | PrefectHQ__prefect | src/prefect/workers/base.py | {
"start": 12774,
"end": 14701
} | class ____(BaseModel):
name: Optional[str] = Field(
default=None,
description="Name given to infrastructure created by a worker.",
)
env: dict[str, Optional[str]] = Field(
default_factory=dict,
title="Environment Variables",
description="Environment variables to set when starting a flow run.",
)
labels: dict[str, str] = Field(
default_factory=dict,
description="Labels applied to infrastructure created by a worker.",
)
command: Optional[str] = Field(
default=None,
description=(
"The command to use when starting a flow run. "
"In most cases, this should be left blank and the command "
"will be automatically generated by the worker."
),
)
@classmethod
def model_json_schema(
cls,
by_alias: bool = True,
ref_template: str = "#/definitions/{model}",
schema_generator: Type[GenerateJsonSchema] = GenerateJsonSchema,
mode: Literal["validation", "serialization"] = "validation",
*,
union_format: Literal["any_of", "primitive_type_array"] = "any_of",
) -> dict[str, Any]:
"""TODO: stop overriding this method - use GenerateSchema in ConfigDict instead?"""
schema = super().model_json_schema(
by_alias, ref_template, schema_generator, mode
)
# ensure backwards compatibility by copying $defs into definitions
if "$defs" in schema:
schema["definitions"] = schema.pop("$defs")
# we aren't expecting these additional fields in the schema
if "additionalProperties" in schema:
schema.pop("additionalProperties")
for _, definition in schema.get("definitions", {}).items():
if "additionalProperties" in definition:
definition.pop("additionalProperties")
return schema
| BaseVariables |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_adls_list.py | {
"start": 1134,
"end": 1602
} | class ____:
@mock.patch("airflow.providers.microsoft.azure.operators.adls.AzureDataLakeHook")
def test_execute(self, mock_hook):
mock_hook.return_value.list.return_value = MOCK_FILES
operator = ADLSListOperator(task_id=TASK_ID, path=TEST_PATH)
files = operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(path=TEST_PATH)
assert sorted(files) == sorted(MOCK_FILES)
| TestAzureDataLakeStorageListOperator |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 6770,
"end": 6953
} | class ____(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = virtual.netbsd.NetBSDVirtual
collector_class = virtual.netbsd.NetBSDVirtualCollector
| TestNetBSDVirtual |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 3407,
"end": 3459
} | class ____(SQLRole):
__slots__ = ()
| StructuralRole |
python | python__mypy | mypy/semanal_namedtuple.py | {
"start": 2006,
"end": 31406
} | class ____:
def __init__(
self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder
) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_namedtuple_classdef(
self, defn: ClassDef, is_stub_file: bool, is_func_scope: bool
) -> tuple[bool, TypeInfo | None]:
"""Analyze if given class definition can be a named tuple definition.
Return a tuple where first item indicates whether this can possibly be a named tuple,
and the second item is the corresponding TypeInfo (may be None if not ready and should be
deferred).
"""
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TYPED_NAMEDTUPLE_NAMES:
result = self.check_namedtuple_classdef(defn, is_stub_file)
if result is None:
# This is a valid named tuple, but some types are incomplete.
return True, None
items, types, default_items, statements = result
if is_func_scope and "@" not in defn.name:
defn.name += "@" + str(defn.line)
existing_info = None
if isinstance(defn.analyzed, NamedTupleExpr):
existing_info = defn.analyzed.info
info = self.build_namedtuple_typeinfo(
defn.name, items, types, default_items, defn.line, existing_info
)
defn.analyzed = NamedTupleExpr(info, is_typed=True)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
defn.defs.body = statements
# All done: this is a valid named tuple with all types known.
return True, info
# This can't be a valid named tuple.
return False, None
def check_namedtuple_classdef(
self, defn: ClassDef, is_stub_file: bool
) -> tuple[list[str], list[Type], dict[str, Expression], list[Statement]] | None:
"""Parse and validate fields in named tuple class definition.
Return a four tuple:
* field names
* field types
* field default values
* valid statements
or None, if any of the types are not ready.
"""
if len(defn.base_type_exprs) > 1:
self.fail("NamedTuple should be a single base", defn)
items: list[str] = []
types: list[Type] = []
default_items: dict[str, Expression] = {}
statements: list[Statement] = []
for stmt in defn.defs.body:
statements.append(stmt)
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty namedtuples).
if isinstance(stmt, PassStmt) or (
isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, EllipsisExpr)
):
continue
# Also allow methods, including decorated ones.
if isinstance(stmt, (Decorator, FuncBase)):
continue
# And docstrings.
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
# An assignment, but an invalid one.
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
else:
# Append name and type in this case...
name = stmt.lvalues[0].name
items.append(name)
if stmt.type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
# We never allow recursive types at function scope. Although it is
# possible to support this for named tuples, it is still tricky, and
# it would be inconsistent with type aliases.
analyzed = self.api.anal_type(
stmt.type,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="NamedTuple item type",
prohibit_special_class_field_types="NamedTuple",
)
if analyzed is None:
# Something is incomplete. We need to defer this named tuple.
return None
types.append(analyzed)
# ...despite possible minor failures that allow further analysis.
if name.startswith("_"):
self.fail(
f"NamedTuple field name cannot start with an underscore: {name}", stmt
)
if stmt.type is None or hasattr(stmt, "new_syntax") and not stmt.new_syntax:
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif isinstance(stmt.rvalue, TempNode):
# x: int assigns rvalue to TempNode(AnyType())
if default_items:
self.fail(
"Non-default NamedTuple fields cannot follow default fields", stmt
)
else:
default_items[name] = stmt.rvalue
if defn.keywords:
for_function = ' for "__init_subclass__" of "NamedTuple"'
for key in defn.keywords:
self.msg.unexpected_keyword_argument_for_function(for_function, key, defn)
return items, types, default_items, statements
def check_namedtuple(
self, node: Expression, var_name: str | None, is_func_scope: bool
) -> tuple[str | None, TypeInfo | None, list[TypeVarLikeType]]:
"""Check if a call defines a namedtuple.
The optional var_name argument is the name of the variable to
which this is assigned, if any.
Return a tuple of two items:
* Internal name of the named tuple (e.g. the name passed as an argument to namedtuple)
or None if it is not a valid named tuple
* Corresponding TypeInfo, or None if not ready.
If the definition is invalid but looks like a namedtuple,
report errors but return (some) TypeInfo.
"""
if not isinstance(node, CallExpr):
return None, None, []
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return None, None, []
fullname = callee.fullname
if fullname == "collections.namedtuple":
is_typed = False
elif fullname in TYPED_NAMEDTUPLE_NAMES:
is_typed = True
else:
return None, None, []
result = self.parse_namedtuple_args(call, fullname)
if result:
items, types, defaults, typename, tvar_defs, ok = result
else:
# Error. Construct dummy return value.
if var_name:
name = var_name
if is_func_scope:
name += "@" + str(call.line)
else:
name = var_name = "namedtuple@" + str(call.line)
info = self.build_namedtuple_typeinfo(name, [], [], {}, node.line, None)
self.store_namedtuple_info(info, var_name, call, is_typed)
if name != var_name or is_func_scope:
# NOTE: we skip local namespaces since they are not serialized.
self.api.add_symbol_skip_local(name, info)
return var_name, info, []
if not ok:
# This is a valid named tuple but some types are not ready.
return typename, None, []
# We use the variable name as the class name if it exists. If
# it doesn't, we use the name passed as an argument. We prefer
# the variable name because it should be unique inside a
# module, and so we don't need to disambiguate it with a line
# number.
if var_name:
name = var_name
else:
name = typename
if var_name is None or is_func_scope:
# There are two special cases where need to give it a unique name derived
# from the line number:
# * This is a base class expression, since it often matches the class name:
# class NT(NamedTuple('NT', [...])):
# ...
# * This is a local (function or method level) named tuple, since
# two methods of a class can define a named tuple with the same name,
# and they will be stored in the same namespace (see below).
name += "@" + str(call.line)
if defaults:
default_items = {
arg_name: default for arg_name, default in zip(items[-len(defaults) :], defaults)
}
else:
default_items = {}
existing_info = None
if isinstance(node.analyzed, NamedTupleExpr):
existing_info = node.analyzed.info
info = self.build_namedtuple_typeinfo(
name, items, types, default_items, node.line, existing_info
)
# If var_name is not None (i.e. this is not a base class expression), we always
# store the generated TypeInfo under var_name in the current scope, so that
# other definitions can use it.
if var_name:
self.store_namedtuple_info(info, var_name, call, is_typed)
else:
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
# There are three cases where we need to store the generated TypeInfo
# second time (for the purpose of serialization):
# * If there is a name mismatch like One = NamedTuple('Other', [...])
# we also store the info under name 'Other@lineno', this is needed
# because classes are (de)serialized using their actual fullname, not
# the name of l.h.s.
# * If this is a method level named tuple. It can leak from the method
# via assignment to self attribute and therefore needs to be serialized
# (local namespaces are not serialized).
# * If it is a base class expression. It was not stored above, since
# there is no var_name (but it still needs to be serialized
# since it is in MRO of some class).
if name != var_name or is_func_scope:
# NOTE: we skip local namespaces since they are not serialized.
self.api.add_symbol_skip_local(name, info)
return typename, info, tvar_defs
def store_namedtuple_info(
self, info: TypeInfo, name: str, call: CallExpr, is_typed: bool
) -> None:
self.api.add_symbol(name, info, call)
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
def parse_namedtuple_args(
self, call: CallExpr, fullname: str
) -> None | (tuple[list[str], list[Type], list[Expression], str, list[TypeVarLikeType], bool]):
"""Parse a namedtuple() call into data needed to construct a type.
Returns a 6-tuple:
- List of argument names
- List of argument types
- List of default values
- First argument of namedtuple
- All typevars found in the field definition
- Whether all types are ready.
Return None if the definition didn't typecheck.
"""
type_name = "NamedTuple" if fullname in TYPED_NAMEDTUPLE_NAMES else "namedtuple"
# TODO: Share code with check_argument_count in checkexpr.py?
args = call.args
if len(args) < 2:
self.fail(f'Too few arguments for "{type_name}()"', call)
return None
defaults: list[Expression] = []
rename = False
if len(args) > 2:
# Typed namedtuple doesn't support additional arguments.
if fullname in TYPED_NAMEDTUPLE_NAMES:
self.fail('Too many arguments for "NamedTuple()"', call)
return None
for i, arg_name in enumerate(call.arg_names[2:], 2):
if arg_name == "defaults":
arg = args[i]
# We don't care what the values are, as long as the argument is an iterable
# and we can count how many defaults there are.
if isinstance(arg, (ListExpr, TupleExpr)):
defaults = list(arg.items)
else:
self.fail(
"List or tuple literal expected as the defaults argument to "
"{}()".format(type_name),
arg,
)
elif arg_name == "rename":
arg = args[i]
if isinstance(arg, NameExpr) and arg.name in ("True", "False"):
rename = arg.name == "True"
else:
self.fail(
f'Boolean literal expected as the "rename" argument to {type_name}()',
arg,
code=ARG_TYPE,
)
if call.arg_kinds[:2] != [ARG_POS, ARG_POS]:
self.fail(f'Unexpected arguments to "{type_name}()"', call)
return None
if not isinstance(args[0], StrExpr):
self.fail(f'"{type_name}()" expects a string literal as the first argument', call)
return None
typename = args[0].value
types: list[Type] = []
tvar_defs = []
if not isinstance(args[1], (ListExpr, TupleExpr)):
if fullname == "collections.namedtuple" and isinstance(args[1], StrExpr):
str_expr = args[1]
items = str_expr.value.replace(",", " ").split()
else:
self.fail(
'List or tuple literal expected as the second argument to "{}()"'.format(
type_name
),
call,
)
return None
else:
listexpr = args[1]
if fullname == "collections.namedtuple":
# The fields argument contains just names, with implicit Any types.
if not is_StrExpr_list(listexpr.items):
self.fail('String literal expected as "namedtuple()" item', call)
return None
items = [item.value for item in listexpr.items]
else:
type_exprs = [
t.items[1]
for t in listexpr.items
if isinstance(t, TupleExpr) and len(t.items) == 2
]
tvar_defs = self.api.get_and_bind_all_tvars(type_exprs)
# The fields argument contains (name, type) tuples.
result = self.parse_namedtuple_fields_with_types(listexpr.items, call)
if result is None:
# One of the types is not ready, defer.
return None
items, types, _, ok = result
if not ok:
return [], [], [], typename, [], False
if not types:
types = [AnyType(TypeOfAny.unannotated) for _ in items]
processed_items = []
seen_names: set[str] = set()
for i, item in enumerate(items):
problem = self.check_namedtuple_field_name(item, seen_names)
if problem is None:
processed_items.append(item)
seen_names.add(item)
else:
if not rename:
self.fail(f'"{type_name}()" {problem}', call)
# Even if rename=False, we pretend that it is True.
# At runtime namedtuple creation would throw an error;
# applying the rename logic means we create a more sensible
# namedtuple.
new_name = f"_{i}"
processed_items.append(new_name)
seen_names.add(new_name)
if len(defaults) > len(items):
self.fail(f'Too many defaults given in call to "{type_name}()"', call)
defaults = defaults[: len(items)]
return processed_items, types, defaults, typename, tvar_defs, True
def parse_namedtuple_fields_with_types(
self, nodes: list[Expression], context: Context
) -> tuple[list[str], list[Type], list[Expression], bool] | None:
"""Parse typed named tuple fields.
Return (names, types, defaults, whether types are all ready), or None if error occurred.
"""
items: list[str] = []
types: list[Type] = []
for item in nodes:
if isinstance(item, TupleExpr):
if len(item.items) != 2:
self.fail('Invalid "NamedTuple()" field definition', item)
return None
name, type_node = item.items
if isinstance(name, StrExpr):
items.append(name.value)
else:
self.fail('Invalid "NamedTuple()" field name', item)
return None
try:
type = expr_to_unanalyzed_type(type_node, self.options, self.api.is_stub_file)
except TypeTranslationError:
self.fail("Invalid field type", type_node)
return None
# We never allow recursive types at function scope.
analyzed = self.api.anal_type(
type,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="NamedTuple item type",
prohibit_special_class_field_types="NamedTuple",
)
# Workaround #4987 and avoid introducing a bogus UnboundType
if isinstance(analyzed, UnboundType):
analyzed = AnyType(TypeOfAny.from_error)
# These should be all known, otherwise we would defer in visit_assignment_stmt().
if analyzed is None:
return [], [], [], False
types.append(analyzed)
else:
self.fail('Tuple expected as "NamedTuple()" field', item)
return None
return items, types, [], True
def build_namedtuple_typeinfo(
self,
name: str,
items: list[str],
types: list[Type],
default_items: Mapping[str, Expression],
line: int,
existing_info: TypeInfo | None,
) -> TypeInfo:
strtype = self.api.named_type("builtins.str")
implicit_any = AnyType(TypeOfAny.special_form)
basetuple_type = self.api.named_type("builtins.tuple", [implicit_any])
dictype = self.api.named_type("builtins.dict", [strtype, implicit_any])
# Actual signature should return OrderedDict[str, Union[types]]
ordereddictype = self.api.named_type("builtins.dict", [strtype, implicit_any])
fallback = self.api.named_type("builtins.tuple", [implicit_any])
# Note: actual signature should accept an invariant version of Iterable[UnionType[types]].
# but it can't be expressed. 'new' and 'len' should be callable types.
iterable_type = self.api.named_type_or_none("typing.Iterable", [implicit_any])
function_type = self.api.named_type("builtins.function")
literals: list[Type] = [LiteralType(item, strtype) for item in items]
match_args_type = TupleType(literals, basetuple_type)
info = existing_info or self.api.basic_new_typeinfo(name, fallback, line)
info.is_named_tuple = True
tuple_base = TupleType(types, fallback)
if info.special_alias and has_placeholder(info.special_alias.target):
self.api.process_placeholder(
None, "NamedTuple item", info, force_progress=tuple_base != info.tuple_type
)
info.update_tuple_type(tuple_base)
info.line = line
# For use by mypyc.
info.metadata["namedtuple"] = {"fields": items.copy()}
# We can't calculate the complete fallback type until after semantic
# analysis, since otherwise base classes might be incomplete. Postpone a
# callback function that patches the fallback.
if not has_placeholder(tuple_base) and not has_type_vars(tuple_base):
self.api.schedule_patch(
PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(tuple_base)
)
def add_field(
var: Var, is_initialized_in_class: bool = False, is_property: bool = False
) -> None:
var.info = info
var.is_initialized_in_class = is_initialized_in_class
var.is_property = is_property
var._fullname = f"{info.fullname}.{var.name}"
info.names[var.name] = SymbolTableNode(MDEF, var)
fields = [Var(item, typ) for item, typ in zip(items, types)]
for var in fields:
add_field(var, is_property=True)
# We can't share Vars between fields and method arguments, since they
# have different full names (the latter are normally used as local variables
# in functions, so their full names are set to short names when generated methods
# are analyzed).
vars = [Var(item, typ) for item, typ in zip(items, types)]
tuple_of_strings = TupleType([strtype for _ in items], basetuple_type)
add_field(Var("_fields", tuple_of_strings), is_initialized_in_class=True)
add_field(Var("_field_types", dictype), is_initialized_in_class=True)
add_field(Var("_field_defaults", dictype), is_initialized_in_class=True)
add_field(Var("_source", strtype), is_initialized_in_class=True)
add_field(Var("__annotations__", ordereddictype), is_initialized_in_class=True)
add_field(Var("__doc__", strtype), is_initialized_in_class=True)
if self.options.python_version >= (3, 10):
add_field(Var("__match_args__", match_args_type), is_initialized_in_class=True)
assert info.tuple_type is not None # Set by update_tuple_type() above.
shared_self_type = TypeVarType(
name=SELF_TVAR_NAME,
fullname=f"{info.fullname}.{SELF_TVAR_NAME}",
# Namespace is patched per-method below.
id=self.api.tvar_scope.new_unique_func_id(),
values=[],
upper_bound=info.tuple_type,
default=AnyType(TypeOfAny.from_omitted_generics),
)
def add_method(
funcname: str,
ret: Type | None, # None means use (patched) self-type
args: list[Argument],
is_classmethod: bool = False,
is_new: bool = False,
) -> None:
fullname = f"{info.fullname}.{funcname}"
self_type = shared_self_type.copy_modified(
id=TypeVarId(shared_self_type.id.raw_id, namespace=fullname)
)
if ret is None:
ret = self_type
if is_classmethod or is_new:
first = [Argument(Var("_cls"), TypeType.make_normalized(self_type), None, ARG_POS)]
else:
first = [Argument(Var("_self"), self_type, None, ARG_POS)]
args = first + args
types = [arg.type_annotation for arg in args]
items = [arg.variable.name for arg in args]
arg_kinds = [arg.kind for arg in args]
assert None not in types
signature = CallableType(cast(list[Type], types), arg_kinds, items, ret, function_type)
signature.variables = (self_type,)
func = FuncDef(funcname, args, Block([]))
func.info = info
func.is_class = is_classmethod
func.type = set_callable_name(signature, func)
func._fullname = fullname
func.line = line
if is_classmethod:
v = Var(funcname, func.type)
v.is_classmethod = True
v.info = info
v._fullname = func._fullname
func.is_decorated = True
dec = Decorator(func, [NameExpr("classmethod")], v)
dec.line = line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[funcname] = sym
add_method(
"_replace",
ret=None,
args=[Argument(var, var.type, EllipsisExpr(), ARG_NAMED_OPT) for var in vars],
)
if self.options.python_version >= (3, 13):
add_method(
"__replace__",
ret=None,
args=[Argument(var, var.type, EllipsisExpr(), ARG_NAMED_OPT) for var in vars],
)
def make_init_arg(var: Var) -> Argument:
default = default_items.get(var.name, None)
kind = ARG_POS if default is None else ARG_OPT
return Argument(var, var.type, default, kind)
add_method("__new__", ret=None, args=[make_init_arg(var) for var in vars], is_new=True)
add_method("_asdict", args=[], ret=ordereddictype)
add_method(
"_make",
ret=None,
is_classmethod=True,
args=[Argument(Var("iterable", iterable_type), iterable_type, None, ARG_POS)],
)
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
[],
info.tuple_type,
AnyType(TypeOfAny.from_omitted_generics),
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
return info
@contextmanager
def save_namedtuple_body(self, named_tuple_info: TypeInfo) -> Iterator[None]:
"""Preserve the generated body of class-based named tuple and then restore it.
Temporarily clear the names dict so we don't get errors about duplicate names
that were already set in build_namedtuple_typeinfo (we already added the tuple
field names while generating the TypeInfo, and actual duplicates are
already reported).
"""
nt_names = named_tuple_info.names
named_tuple_info.names = SymbolTable()
yield
# Make sure we didn't use illegal names, then reset the names in the typeinfo.
for prohibited in NAMEDTUPLE_PROHIBITED_NAMES:
if prohibited in named_tuple_info.names:
if nt_names.get(prohibited) is named_tuple_info.names[prohibited]:
continue
ctx = named_tuple_info.names[prohibited].node
assert ctx is not None
self.fail(f'Cannot overwrite NamedTuple attribute "{prohibited}"', ctx)
# Restore the names in the original symbol table. This ensures that the symbol
# table contains the field objects created by build_namedtuple_typeinfo. Exclude
# __doc__, which can legally be overwritten by the class.
for key, value in nt_names.items():
if key in named_tuple_info.names:
if key == "__doc__":
continue
sym = named_tuple_info.names[key]
if isinstance(sym.node, (FuncBase, Decorator)) and not sym.plugin_generated:
# Keep user-defined methods as is.
continue
# Do not retain placeholders - we'll get back here if they cease to
# be placeholders later. If we keep placeholders alive, they may never
# be reached again, making it to cacheable symtable.
if not isinstance(sym.node, PlaceholderNode):
# Keep existing (user-provided) definitions under mangled names, so they
# get semantically analyzed.
r_key = get_unique_redefinition_name(key, named_tuple_info.names)
named_tuple_info.names[r_key] = sym
named_tuple_info.names[key] = value
# Helpers
def check_namedtuple_field_name(self, field: str, seen_names: Container[str]) -> str | None:
"""Return None for valid fields, a string description for invalid ones."""
if field in seen_names:
return f'has duplicate field name "{field}"'
elif not field.isidentifier():
return f'field name "{field}" is not a valid identifier'
elif field.startswith("_"):
return f'field name "{field}" starts with an underscore'
elif keyword.iskeyword(field):
return f'field name "{field}" is a keyword'
return None
def fail(self, msg: str, ctx: Context, code: ErrorCode | None = None) -> None:
self.api.fail(msg, ctx, code=code)
| NamedTupleAnalyzer |
python | doocs__leetcode | solution/2400-2499/2444.Count Subarrays With Fixed Bounds/Solution.py | {
"start": 0,
"end": 389
} | class ____:
def countSubarrays(self, nums: List[int], minK: int, maxK: int) -> int:
j1 = j2 = k = -1
ans = 0
for i, v in enumerate(nums):
if v < minK or v > maxK:
k = i
if v == minK:
j1 = i
if v == maxK:
j2 = i
ans += max(0, min(j1, j2) - k)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/model_tests/model_handler.py | {
"start": 25284,
"end": 25507
} | class ____(_ModelHandlerManagerBase):
"""Manages a series of ModelHandlers for aggregated testing/benchmarking in TF2."""
model_handler_cls = ModelHandlerV2
trt_model_handler_cls = TrtModelHandlerV2
| ModelHandlerManagerV2 |
python | streamlit__streamlit | e2e_playwright/conftest.py | {
"start": 1683,
"end": 2798
} | class ____(Page):
pass
def pytest_configure(config: pytest.Config) -> None:
"""Register custom markers."""
config.addinivalue_line(
"markers", "no_perf: mark test to not use performance profiling"
)
config.addinivalue_line(
"markers", "app_hash(hash): mark test to open the app with a URL hash"
)
def reorder_early_fixtures(metafunc: pytest.Metafunc) -> None:
"""Put fixtures with `pytest.mark.early` first during execution.
This allows patch of configurations before the application is initialized
Copied from: https://github.com/pytest-dev/pytest/issues/1216#issuecomment-456109892
"""
for fixture_definitions in metafunc._arg2fixturedefs.values():
fixturedef = fixture_definitions[0]
for mark in getattr(fixturedef.func, "pytestmark", []):
if mark.name == "early":
order = metafunc.fixturenames
order.insert(0, order.pop(order.index(fixturedef.argname)))
break
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
reorder_early_fixtures(metafunc)
| StaticPage |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 554800,
"end": 584092
} | class ____(FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefnumber):
r"""
Size schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Size: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Size: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Size: ...
@overload
def bandPosition(self, _: float, /) -> Size: ...
@overload
def bin(self, _: bool | Bin | None, /) -> Size: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Size: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> Size: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> Size: ...
@overload
def condition(self, _: list[core.ConditionalValueDefnumberExprRef], /) -> Size: ...
@overload
def field(self, _: str | RepeatRef, /) -> Size: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Size: ...
@overload
def legend(self, _: Legend | None, /) -> Size: ...
@overload
def legend(
self,
*,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columns: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
direction: Optional[SchemaBase | Orientation_T] = Undefined,
fillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[SchemaBase | LegendOrient_T] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
type: Optional[Literal["symbol", "gradient"]] = Undefined,
values: Optional[
Parameter
| SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
] = Undefined,
zindex: Optional[float] = Undefined,
) -> Size: ...
@overload
def scale(self, _: Scale | None, /) -> Size: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> Size: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> Size: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Size: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Size: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Size: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Size: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Size: ...
@overload
def type(self, _: StandardType_T, /) -> Size: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Size |
python | scipy__scipy | scipy/sparse/linalg/tests/test_onenormest.py | {
"start": 8636,
"end": 9300
} | class ____:
@pytest.mark.thread_unsafe(reason="Fails in parallel for unknown reasons")
def test_randn_inv(self):
rng = np.random.RandomState(1234)
n = 20
nsamples = 100
for i in range(nsamples):
# Choose integer t uniformly between 1 and 3 inclusive.
t = rng.randint(1, 4)
# Choose n uniformly between 10 and 40 inclusive.
n = rng.randint(10, 41)
# Sample the inverse of a matrix with random normal entries.
A = scipy.linalg.inv(rng.randn(n, n))
# Compute the 1-norm bounds.
g, ind = _algorithm_2_2(A, A.T, t)
| TestAlgorithm_2_2 |
python | mahmoud__boltons | boltons/typeutils.py | {
"start": 5514,
"end": 5814
} | class ____:
"""Much like a :class:`property`, but the wrapped get function is a
class method. For simplicity, only read-only properties are
implemented.
"""
def __init__(self, fn):
self.fn = fn
def __get__(self, instance, cls):
return self.fn(cls)
| classproperty |
python | scipy__scipy | scipy/optimize/tests/test__remove_redundancy.py | {
"start": 6446,
"end": 6563
} | class ____(RRCommonTests):
def rr(self, A, b):
return _remove_redundancy_pivot_dense(A, b)
| TestRRPivotDense |
python | doocs__leetcode | solution/2700-2799/2731.Movement of Robots/Solution.py | {
"start": 0,
"end": 336
} | class ____:
def sumDistance(self, nums: List[int], s: str, d: int) -> int:
mod = 10**9 + 7
for i, c in enumerate(s):
nums[i] += d if c == "R" else -d
nums.sort()
ans = s = 0
for i, x in enumerate(nums):
ans += i * x - s
s += x
return ans % mod
| Solution |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/annotation/_font.py | {
"start": 235,
"end": 9918
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.annotation"
_path_str = "layout.scene.annotation.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the annotation text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.annotation.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.annotation.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.annotation.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | tensorflow__tensorflow | tensorflow/python/ops/data_flow_ops.py | {
"start": 31702,
"end": 35045
} | class ____(QueueBase):
"""A queue implementation that dequeues elements in first-in first-out order.
GPUCompatibleFIFOQueue is like FIFOQueue, but the queue resource may be placed
either on a CPU or on a GPU. It is not cross-device: enqueues and dequeues
will be colocated with the queue resource. GPUCompatibleFIFOQueue only
supports enqueue and dequeue at the moment, not enqueue_many or dequeue_many.
See `tf.queue.QueueBase` for a description of the methods in this class.
"""
def __init__(self,
capacity,
dtypes,
shapes=None,
names=None,
shared_name=None,
name="fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `FIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `FIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
Args:
capacity: An integer. The upper bound on the number of elements that may
be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal the
number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects with the
same length as `dtypes`, or `None`.
names: (Optional.) A list of strings naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under the
given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
names = _as_name_list(names, dtypes)
with ops.init_scope():
queue_ref = gen_data_flow_ops.fifo_queue_v2(
component_types=dtypes,
shapes=shapes,
capacity=capacity,
shared_name=_shared_name(shared_name),
name=name)
super(GPUCompatibleFIFOQueue, self).__init__(
dtypes, shapes, names, queue_ref)
def enqueue_many(self, vals, name=None):
"""enqueue_many is not supported on GPUCompatibleFIFOQueue."""
raise NotImplementedError(
"GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, "
"only enqueue and dequeue.")
def dequeue_many(self, n, name=None):
"""dequeue_many is not supported on GPUCompatibleFIFOQueue."""
raise NotImplementedError(
"GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, "
"only enqueue and dequeue.")
@tf_export(
"queue.PaddingFIFOQueue",
v1=["queue.PaddingFIFOQueue", "io.PaddingFIFOQueue", "PaddingFIFOQueue"])
@deprecation.deprecated_endpoints(["io.PaddingFIFOQueue", "PaddingFIFOQueue"])
| GPUCompatibleFIFOQueue |
python | celery__celery | t/unit/worker/test_bootsteps.py | {
"start": 1329,
"end": 2713
} | class ____:
class Def(bootsteps.StartStopStep):
name = 'test_Step.Def'
def setup_method(self):
self.steps = []
def test_blueprint_name(self, bp='test_blueprint_name'):
class X(bootsteps.Step):
blueprint = bp
name = 'X'
assert X.name == 'X'
class Y(bootsteps.Step):
name = '%s.Y' % bp
assert Y.name == f'{bp}.Y'
def test_init(self):
assert self.Def(self)
def test_create(self):
self.Def(self).create(self)
def test_include_if(self):
x = self.Def(self)
x.enabled = True
assert x.include_if(self)
x.enabled = False
assert not x.include_if(self)
def test_instantiate(self):
assert isinstance(
self.Def(self).instantiate(self.Def, self),
self.Def,
)
def test_include_when_enabled(self):
x = self.Def(self)
x.create = Mock()
x.create.return_value = 'George'
assert x.include(self)
assert x.obj == 'George'
x.create.assert_called_with(self)
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.create = Mock()
assert not x.include(self)
x.create.assert_not_called()
def test_repr(self):
x = self.Def(self)
assert repr(x)
| test_Step |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_table_columns_to_match_set.py | {
"start": 2907,
"end": 23963
} | class ____(BatchExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectTableColumnsToMatchSet is a \
Batch Expectation.
BatchExpectations are one of the most common types of Expectation.
They are evaluated for an entire Batch, and answer a semantic question about the Batch itself.
Args:
column_set (list of str): {COLUMN_SET_DESCRIPTION}
exact_match (boolean): \
{EXACT_MATCH_DESCRIPTION} Default True.
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1.00 2
1 2.30 5
2 4.33 0
Code Examples:
Passing Case:
Input:
ExpectTableColumnsToMatchSet(
column_set=["test"],
exact_match=False
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": [
"test",
"test2"
],
"details": {{
"mismatched": {{
"unexpected": [
"test2"
]
}}
}}
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectTableColumnsToMatchSet(
column_set=["test2", "test3"],
exact_match=True
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": [
"test",
"test2"
],
"details": {{
"mismatched": {{
"unexpected": [
"test"
],
"missing": [
"test3"
]
}}
}}
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
column_set: Union[list, set, SuiteParameterDict, None] = pydantic.Field(
description=COLUMN_SET_DESCRIPTION
)
exact_match: Union[bool, SuiteParameterDict, None] = pydantic.Field(
default=True, description=EXACT_MATCH_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "table expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("table.columns",)
success_keys = (
"column_set",
"exact_match",
)
args_keys = (
"column_set",
"exact_match",
)
class Config:
title = "Expect table columns to match set"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectTableColumnsToMatchSet]) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column_set", RendererValueType.ARRAY),
("exact_match", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.column_set:
template_str = "Must specify a set or list of columns."
else:
array_param_name = "column_set"
param_prefix = "column_set_"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
column_set_str: str = cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
exact_match_str = (
"exactly" if params.exact_match and params.exact_match.value is True else "at least"
)
template_str = (
f"Must have {exact_match_str} these columns (in any order): {column_set_str}"
)
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(configuration.kwargs, ["column_set", "exact_match"])
if params["column_set"] is None:
template_str = "Must specify a set or list of columns."
else:
# standardize order of the set for output
params["column_list"] = list(params["column_set"])
column_list_template_str = ", ".join(
[f"$column_list_{idx}" for idx in range(len(params["column_list"]))]
)
exact_match_str = "exactly" if params["exact_match"] is True else "at least"
template_str = f"Must have {exact_match_str} these columns (in any order): {column_list_template_str}" # noqa: E501 # FIXME CoP
for idx in range(len(params["column_list"])):
params[f"column_list_{idx!s}"] = params["column_list"][idx]
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type=AtomicDiagnosticRendererType.OBSERVED_VALUE)
@override
def _atomic_diagnostic_observed_value(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
) -> RenderedAtomicContent:
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
expected_param_prefix = "exp__"
expected_param_name = "expected_value"
ov_param_prefix = "ov__"
ov_param_name = "observed_value"
renderer_configuration.add_param(
name=expected_param_name,
param_type=RendererValueType.ARRAY,
value=renderer_configuration.kwargs.get("column_set", []),
)
renderer_configuration = cls._add_array_params(
array_param_name=expected_param_name,
param_prefix=expected_param_prefix,
renderer_configuration=renderer_configuration,
)
renderer_configuration.add_param(
name=ov_param_name,
param_type=RendererValueType.ARRAY,
value=result.get("result", {}).get("observed_value", []) if result else [],
)
renderer_configuration = cls._add_array_params(
array_param_name=ov_param_name,
param_prefix=ov_param_prefix,
renderer_configuration=renderer_configuration,
)
observed_columns = (
(name, sch)
for name, sch in renderer_configuration.params
if name.startswith(ov_param_prefix)
)
expected_columns = (
(name, sch)
for name, sch in renderer_configuration.params
if name.startswith(expected_param_prefix)
)
mismatched_columns = {"unexpected": [], "missing": []}
if (
"details" in result["result"]
and "mismatched" in result["result"]["details"]
and result["result"]["details"]["mismatched"]
):
mismatched_columns.update(result["result"]["details"]["mismatched"])
template_str_list = []
for name, schema in observed_columns:
render_state = (
ObservedValueRenderState.UNEXPECTED.value
if schema.value in mismatched_columns["unexpected"]
else ObservedValueRenderState.EXPECTED.value
)
renderer_configuration.params.__dict__[name].render_state = render_state
template_str_list.append(f"${name}")
for name, schema in expected_columns:
if schema.value in mismatched_columns["missing"]:
renderer_configuration.params.__dict__[
name
].render_state = ObservedValueRenderState.MISSING.value
template_str_list.append(f"${name}")
renderer_configuration.template_str = " ".join(template_str_list)
value_obj = renderedAtomicValueSchema.load(
{
"template": renderer_configuration.template_str,
"params": renderer_configuration.params.dict(),
"meta_notes": renderer_configuration.meta_notes,
"schema": {"type": "com.superconductive.rendered.string"},
}
)
return RenderedAtomicContent(
name=AtomicDiagnosticRendererType.OBSERVED_VALUE,
value=value_obj,
value_type="StringValueType",
)
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
from great_expectations.execution_engine import SqlAlchemyExecutionEngine
if isinstance(execution_engine, SqlAlchemyExecutionEngine):
return self._validate_sqlalchemy(metrics)
# Retrieve expected and observed column names
expected_column_list = self._get_success_kwargs().get("column_set")
expected_column_set = (
set(expected_column_list) if expected_column_list is not None else set()
)
actual_column_list = metrics.get("table.columns")
actual_column_set = set(actual_column_list)
unmatched_actual_column_set = actual_column_set - expected_column_set
unmatched_expected_column_set = expected_column_set - actual_column_set
return _validate_result(
actual_column_set,
expected_column_set,
unmatched_actual_column_set,
unmatched_expected_column_set,
self._get_success_kwargs().get("exact_match"),
)
def _validate_sqlalchemy(self, metrics: Dict):
# We want to match the expected columns with the actual columns. We first break up the
# expected columns into 2 sets, the quoted columns which must match exactly and the unquoted
# columns, which we case insensitive match.
expected_column_set = set(self._get_success_kwargs().get("column_set"))
quoted_expected_column_set = set()
unquoted_expected_column_set = set()
for col in expected_column_set:
if col.startswith('"') and col.endswith('"'):
quoted_expected_column_set.add(col[1:-1])
else:
unquoted_expected_column_set.add(col)
# The actual columns from the db will be unquoted and may be strs or CaseInsensitiveStrings.
# We normalize the actual_column_list to CaseInsensitiveStrings so we can use set operations
# going forward.
actual_column_list = metrics.get("table.columns")
actual_column_set = _make_case_insensitive_set(actual_column_list)
# We make copies of the expected and actual column sets and remove items from them as we
# find matches between the 2 sets.
unmatched_expected_column_set = expected_column_set.copy()
unmatched_actual_column_set = actual_column_set.copy()
# We first match quoted strings. The expected set is a set of strs while the actual set
# is a set of CaseInsensitiveStrings so we can't use set operations.
for col in actual_column_set:
if str(col) in quoted_expected_column_set:
unmatched_expected_column_set.remove(f'"{col!s}"')
unmatched_actual_column_set.remove(col)
# We normalize the unmatched_expected_column_set to CaseInsensitiveStrings
unmatched_expected_column_set = _make_case_insensitive_set(unmatched_expected_column_set)
# We now do the unquoted match
unquoted_expected_column_set = _make_case_insensitive_set(unquoted_expected_column_set)
unquoted_matches = unquoted_expected_column_set.intersection(unmatched_actual_column_set)
# We subtract the unquoted matches from the current unmatched sets to finalize them
unmatched_actual_column_set = unmatched_actual_column_set - unquoted_matches
unmatched_expected_column_set = unmatched_expected_column_set - unquoted_matches
return _validate_result(
actual_column_set,
expected_column_set,
unmatched_actual_column_set,
unmatched_expected_column_set,
self._get_success_kwargs().get("exact_match"),
)
def _make_case_insensitive_set(
str_set: Optional[set[str | CaseInsensitiveString]],
) -> set[CaseInsensitiveString]:
"""
Transforms a set of strs to CaseInsensitiveStrings.
Args:
str_set: A set of strs.
Returns:
A set of CaseInsensitiveString.
"""
from great_expectations.expectations.metrics.util import (
CaseInsensitiveString,
)
if str_set is None:
return set()
case_insensitive_strs = set()
for s in str_set:
if isinstance(s, CaseInsensitiveString):
case_insensitive_strs.add(s)
elif isinstance(s, str):
case_insensitive_strs.add(CaseInsensitiveString(s))
else:
raise InvalidSetTypeError(
expected_type="str or CaseInsensitiveString", actual_type=str(type(s))
)
return case_insensitive_strs
def _validate_result(
actual_column_set: Set[Union[str, CaseInsensitiveString]],
expected_column_set: Set[str],
unmatched_actual_column_set: Set[Union[str, CaseInsensitiveString]],
unmatched_expected_column_set: Set[Union[str, CaseInsensitiveString]],
exact_match: bool,
) -> Dict[str, Any]:
empty_set = set()
observed_value = sorted([str(col) for col in actual_column_set])
if ((expected_column_set is None) and (exact_match is not True)) or (
unmatched_expected_column_set == empty_set and unmatched_actual_column_set == empty_set
):
return {"success": True, "result": {"observed_value": observed_value}}
else:
unexpected_list = sorted([str(col) for col in unmatched_actual_column_set])
missing_list = sorted([str(col) for col in unmatched_expected_column_set])
mismatched = {}
if len(unexpected_list) > 0:
mismatched["unexpected"] = unexpected_list
if len(missing_list) > 0:
mismatched["missing"] = missing_list
result = {
"observed_value": observed_value,
"details": {"mismatched": mismatched},
}
return_success = {
"success": True,
"result": result,
}
return_failed = {
"success": False,
"result": result,
}
if exact_match:
return return_failed
else: # noqa: PLR5501 # FIXME CoP
# Failed if there are items in the missing list (but OK to have unexpected_list)
if len(missing_list) > 0:
return return_failed
# Passed if there are no items in the missing list
else:
return return_success
| ExpectTableColumnsToMatchSet |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 2881,
"end": 5669
} | class ____:
r"""
Result of `ship`\ping a box: lists of positioned glyphs and rectangles.
This class is not exposed to end users, but converted to a `VectorParse` or
a `RasterParse` by `.MathTextParser.parse`.
"""
def __init__(self, box: Box):
self.box = box
self.glyphs: list[tuple[float, float, FontInfo]] = [] # (ox, oy, info)
self.rects: list[tuple[float, float, float, float]] = [] # (x1, y1, x2, y2)
def to_vector(self) -> VectorParse:
w, h, d = map(
np.ceil, [self.box.width, self.box.height, self.box.depth])
gs = [(info.font, info.fontsize, info.num, ox, h - oy + info.offset)
for ox, oy, info in self.glyphs]
rs = [(x1, h - y2, x2 - x1, y2 - y1)
for x1, y1, x2, y2 in self.rects]
return VectorParse(w, h + d, d, gs, rs)
def to_raster(self, *, antialiased: bool) -> RasterParse:
# Metrics y's and mathtext y's are oriented in opposite directions,
# hence the switch between ymin and ymax.
xmin = min([*[ox + info.metrics.xmin for ox, oy, info in self.glyphs],
*[x1 for x1, y1, x2, y2 in self.rects], 0]) - 1
ymin = min([*[oy - info.metrics.ymax for ox, oy, info in self.glyphs],
*[y1 for x1, y1, x2, y2 in self.rects], 0]) - 1
xmax = max([*[ox + info.metrics.xmax for ox, oy, info in self.glyphs],
*[x2 for x1, y1, x2, y2 in self.rects], 0]) + 1
ymax = max([*[oy - info.metrics.ymin for ox, oy, info in self.glyphs],
*[y2 for x1, y1, x2, y2 in self.rects], 0]) + 1
w = xmax - xmin
h = ymax - ymin - self.box.depth
d = ymax - ymin - self.box.height
image = np.zeros((math.ceil(h + max(d, 0)), math.ceil(w)), np.uint8)
# Ideally, we could just use self.glyphs and self.rects here, shifting
# their coordinates by (-xmin, -ymin), but this yields slightly
# different results due to floating point slop; shipping twice is the
# old approach and keeps baseline images backcompat.
shifted = ship(self.box, (-xmin, -ymin))
for ox, oy, info in shifted.glyphs:
info.font.draw_glyph_to_bitmap(
image, int(ox), int(oy - info.metrics.iceberg), info.glyph,
antialiased=antialiased)
for x1, y1, x2, y2 in shifted.rects:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2
y = int(center - (height + 1) / 2)
else:
y = int(y1)
x1 = math.floor(x1)
x2 = math.ceil(x2)
image[y:y+height+1, x1:x2+1] = 0xff
return RasterParse(0, 0, w, h + d, d, image)
| Output |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 9585,
"end": 20672
} | class ____:
"""Result of an ASP solve."""
def __init__(self, specs, asp=None):
self.asp = asp
self.satisfiable = None
self.optimal = None
self.warnings = None
self.nmodels = 0
# Saved control object for reruns when necessary
self.control = None
# specs ordered by optimization level
self.answers = []
self.cores = []
# names of optimization criteria
self.criteria = []
# Abstract user requests
self.abstract_specs = specs
# possible dependencies
self.possible_dependencies = None
# Concrete specs
self._concrete_specs_by_input = None
self._concrete_specs = None
self._unsolved_specs = None
def format_core(self, core):
"""
Format an unsatisfiable core for human readability
Returns a list of strings, where each string is the human readable
representation of a single fact in the core, including a newline.
Modeled after traceback.format_stack.
"""
error_msg = (
"Internal Error: ASP Result.control not populated. Please report to the spack"
" maintainers"
)
assert self.control, error_msg
symbols = dict((a.literal, a.symbol) for a in self.control.symbolic_atoms)
core_symbols = []
for atom in core:
sym = symbols[atom]
core_symbols.append(sym)
return sorted(str(symbol) for symbol in core_symbols)
def minimize_core(self, core):
"""
Return a subset-minimal subset of the core.
Clingo cores may be thousands of lines when two facts are sufficient to
ensure unsatisfiability. This algorithm reduces the core to only those
essential facts.
"""
error_msg = (
"Internal Error: ASP Result.control not populated. Please report to the spack"
" maintainers"
)
assert self.control, error_msg
min_core = core[:]
for fact in core:
# Try solving without this fact
min_core.remove(fact)
ret = self.control.solve(assumptions=min_core)
if not ret.unsatisfiable:
min_core.append(fact)
return min_core
def minimal_cores(self):
"""
Return a list of subset-minimal unsatisfiable cores.
"""
return [self.minimize_core(core) for core in self.cores]
def format_minimal_cores(self):
"""List of facts for each core
Separate cores are separated by an empty line
"""
string_list = []
for core in self.minimal_cores():
if string_list:
string_list.append("\n")
string_list.extend(self.format_core(core))
return string_list
def format_cores(self):
"""List of facts for each core
Separate cores are separated by an empty line
Cores are not minimized
"""
string_list = []
for core in self.cores:
if string_list:
string_list.append("\n")
string_list.extend(self.format_core(core))
return string_list
def raise_if_unsat(self):
"""
Raise an appropriate error if the result is unsatisfiable.
The error is an SolverError, and includes the minimized cores
resulting from the solve, formatted to be human readable.
"""
if self.satisfiable:
return
constraints = self.abstract_specs
if len(constraints) == 1:
constraints = constraints[0]
conflicts = self.format_minimal_cores()
raise SolverError(constraints, conflicts=conflicts)
@property
def specs(self):
"""List of concretized specs satisfying the initial
abstract request.
"""
if self._concrete_specs is None:
self._compute_specs_from_answer_set()
return self._concrete_specs
@property
def unsolved_specs(self):
"""List of tuples pairing abstract input specs that were not
solved with their associated candidate spec from the solver
(if the solve completed).
"""
if self._unsolved_specs is None:
self._compute_specs_from_answer_set()
return self._unsolved_specs
@property
def specs_by_input(self):
if self._concrete_specs_by_input is None:
self._compute_specs_from_answer_set()
return self._concrete_specs_by_input
def _compute_specs_from_answer_set(self):
if not self.satisfiable:
self._concrete_specs = []
self._unsolved_specs = list((x, None) for x in self.abstract_specs)
self._concrete_specs_by_input = {}
return
self._concrete_specs, self._unsolved_specs = [], []
self._concrete_specs_by_input = {}
best = min(self.answers)
opt, _, answer = best
for input_spec in self.abstract_specs:
# The specs must be unified to get here, so it is safe to associate any satisfying spec
# with the input. Multiple inputs may be matched to the same concrete spec
node = SpecBuilder.make_node(pkg=input_spec.name)
if spack.repo.PATH.is_virtual(input_spec.name):
providers = [
spec.name for spec in answer.values() if spec.package.provides(input_spec.name)
]
node = SpecBuilder.make_node(pkg=providers[0])
candidate = answer.get(node)
if candidate and candidate.satisfies(input_spec):
self._concrete_specs.append(answer[node])
self._concrete_specs_by_input[input_spec] = answer[node]
elif candidate and candidate.build_spec.satisfies(input_spec):
tty.warn(
"explicit splice configuration has caused the concretized spec"
f" {candidate} not to satisfy the input spec {input_spec}"
)
self._concrete_specs.append(answer[node])
self._concrete_specs_by_input[input_spec] = answer[node]
else:
self._unsolved_specs.append((input_spec, candidate))
@staticmethod
def format_unsolved(unsolved_specs):
"""Create a message providing info on unsolved user specs and for
each one show the associated candidate spec from the solver (if
there is one).
"""
msg = "Unsatisfied input specs:"
for input_spec, candidate in unsolved_specs:
msg += f"\n\tInput spec: {str(input_spec)}"
if candidate:
msg += f"\n\tCandidate spec: {candidate.long_spec}"
else:
msg += "\n\t(No candidate specs from solver)"
return msg
def to_dict(self) -> dict:
"""Produces dict representation of Result object
Does not include anything related to unsatisfiability as we
are only interested in storing satisfiable results
"""
serial_node_arg = (
lambda node_dict: f"""{{"id": "{node_dict.id}", "pkg": "{node_dict.pkg}"}}"""
)
ret = dict()
ret["asp"] = self.asp
ret["criteria"] = self.criteria
ret["optimal"] = self.optimal
ret["warnings"] = self.warnings
ret["nmodels"] = self.nmodels
ret["abstract_specs"] = [str(x) for x in self.abstract_specs]
ret["satisfiable"] = self.satisfiable
serial_answers = []
for answer in self.answers:
serial_answer = answer[:2]
serial_answer_dict = {}
for node, spec in answer[2].items():
serial_answer_dict[serial_node_arg(node)] = spec.to_dict()
serial_answer = serial_answer + (serial_answer_dict,)
serial_answers.append(serial_answer)
ret["answers"] = serial_answers
ret["specs_by_input"] = {}
input_specs = {} if not self.specs_by_input else self.specs_by_input
for input, spec in input_specs.items():
ret["specs_by_input"][str(input)] = spec.to_dict()
return ret
@staticmethod
def from_dict(obj: dict):
"""Returns Result object from compatible dictionary"""
def _dict_to_node_argument(dict):
id = dict["id"]
pkg = dict["pkg"]
return NodeArgument(id=id, pkg=pkg)
def _str_to_spec(spec_str):
return spack.spec.Spec(spec_str)
def _dict_to_spec(spec_dict):
loaded_spec = spack.spec.Spec.from_dict(spec_dict)
_ensure_external_path_if_external(loaded_spec)
spack.spec.Spec.ensure_no_deprecated(loaded_spec)
return loaded_spec
asp = obj.get("asp")
spec_list = obj.get("abstract_specs")
if not spec_list:
raise RuntimeError("Invalid json for concretization Result object")
if spec_list:
spec_list = [_str_to_spec(x) for x in spec_list]
result = Result(spec_list, asp)
criteria = obj.get("criteria")
result.criteria = (
None if criteria is None else [OptimizationCriteria(*t) for t in criteria]
)
result.optimal = obj.get("optimal")
result.warnings = obj.get("warnings")
result.nmodels = obj.get("nmodels")
result.satisfiable = obj.get("satisfiable")
result._unsolved_specs = []
answers = []
for answer in obj.get("answers", []):
loaded_answer = answer[:2]
answer_node_dict = {}
for node, spec in answer[2].items():
answer_node_dict[_dict_to_node_argument(json.loads(node))] = _dict_to_spec(spec)
loaded_answer.append(answer_node_dict)
answers.append(tuple(loaded_answer))
result.answers = answers
result._concrete_specs_by_input = {}
result._concrete_specs = []
for input, spec in obj.get("specs_by_input", {}).items():
result._concrete_specs_by_input[_str_to_spec(input)] = _dict_to_spec(spec)
result._concrete_specs.append(_dict_to_spec(spec))
return result
def __eq__(self, other):
eq = (
self.asp == other.asp,
self.satisfiable == other.satisfiable,
self.optimal == other.optimal,
self.warnings == other.warnings,
self.nmodels == other.nmodels,
self.criteria == other.criteria,
self.answers == other.answers,
self.abstract_specs == other.abstract_specs,
self._concrete_specs_by_input == other._concrete_specs_by_input,
self._concrete_specs == other._concrete_specs,
self._unsolved_specs == other._unsolved_specs,
# Not considered for equality
# self.control
# self.possible_dependencies
# self.cores
# self.possible_dependencies
)
print(eq)
return all(eq)
| Result |
python | RaRe-Technologies__gensim | gensim/test/test_phrases.py | {
"start": 18820,
"end": 19344
} | class ____(PhrasesCommon, unittest.TestCase):
"""Test FrozenPhrases models."""
def setUp(self):
"""Set up FrozenPhrases models for the tests."""
bigram_phrases = Phrases(
self.sentences, min_count=1, threshold=1, connector_words=self.connector_words)
self.bigram = FrozenPhrases(bigram_phrases)
bigram_default_phrases = Phrases(self.sentences, connector_words=self.connector_words)
self.bigram_default = FrozenPhrases(bigram_default_phrases)
| TestFrozenPhrasesModel |
python | getsentry__sentry | tests/sentry/issues/test_issue_search.py | {
"start": 17228,
"end": 17853
} | class ____(TestCase):
def test_me(self) -> None:
result = convert_user_value(["me"], [self.project], self.user, None)
assert result[0].id == self.user.id
assert result[0].username == self.user.username
def test_specified_user(self) -> None:
user = self.create_user()
result = convert_user_value([user.username], [self.project], self.user, None)
assert result[0].id == user.id
assert result[0].username == user.username
def test_invalid_user(self) -> None:
assert convert_user_value(["fake-user"], [], self.user, None)[0].id == 0
| ConvertUserValueTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-for-k-virus-variants-to-spread.py | {
"start": 4248,
"end": 5970
} | class ____(object):
def minDayskVariants(self, points, k):
"""
:type points: List[List[int]]
:type k: int
:rtype: int
"""
def add_rec(rec, intervals):
x0, y0, x1, y1 = rec
# add [y0, y1+1) by 1 in [x0, x1+1)
intervals[x0][y0] += 1
intervals[x0][y1+1] -= 1
intervals[x1+1][y0] -= 1
intervals[x1+1][y1+1] += 1
def check(points, k, l): # Time: O(n^2), Space: O(n)
intervals = collections.defaultdict(lambda:collections.defaultdict(int))
y_set = set()
for x, y in points:
add_rec([x-l, y-l, x+l, y+l], intervals)
y_set.add(y-l)
y_set.add(y+l+1)
sorted_y = sorted(y_set)
sorted_x = sorted(intervals.iterkeys())
count = collections.Counter()
for x in sorted_x: # line sweep
for y, c in intervals[x].iteritems():
count[y] += c
cnt = 0
for y in sorted_y:
cnt += count[y]
if cnt >= k:
return True
return False
points = [[x+y, x-y] for x, y in points] # rotate
min_x = min(points)[0]
max_x = max(points)[0]
min_y = min(points, key=lambda x: x[1])[1]
max_y = max(points, key=lambda x: x[1])[1]
left, right = 0, ((max_x-min_x)+(max_y-min_y)+1)//2
while left <= right:
mid = left + (right-left)//2
if check(points, k, mid):
right = mid-1
else:
left = mid+1
return left
| Solution2 |
python | getsentry__sentry | tests/sentry/incidents/models/test_alert_rule.py | {
"start": 9591,
"end": 11082
} | class ____:
method: str
def setUp(self) -> None:
self.suspended_registry = TemporaryAlertRuleTriggerActionRegistry.suspend()
def tearDown(self) -> None:
self.suspended_registry.restore()
def test_no_handler(self) -> None:
trigger = AlertRuleTriggerAction(type=AlertRuleTriggerAction.Type.EMAIL.value)
result = trigger.fire(
Mock(), Mock(), Mock(), metric_value=123, new_status=IncidentStatus.CRITICAL
) # type: ignore[func-returns-value]
# TODO(RyanSkonnord): Remove assertion (see test_handler)
assert result is None
def test_handler(self) -> None:
mock_handler = Mock()
mock_method = getattr(mock_handler.return_value, self.method)
mock_method.return_value = "test"
type = AlertRuleTriggerAction.Type.EMAIL
AlertRuleTriggerAction.register_type("something", type, [])(mock_handler)
trigger = AlertRuleTriggerAction(type=type.value)
result = getattr(trigger, self.method)(
Mock(), Mock(), Mock(), metric_value=123, new_status=IncidentStatus.CRITICAL
)
# TODO(RyanSkonnord): Don't assert on return value.
# All concrete ActionHandlers return None from their fire and resolve
# methods. It seems that this return value's only purpose is to spy on
# whether the AlertRuleTriggerAction produced a handler.
assert result == mock_method.return_value
| AlertRuleTriggerActionActivateBaseTest |
python | mlflow__mlflow | tests/sklearn/test_sklearn_model_export.py | {
"start": 2145,
"end": 34669
} | class ____(NamedTuple):
model: Any
inference_data: Any
@pytest.fixture(scope="module")
def iris_df():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_df = pd.DataFrame(X, columns=iris.feature_names)
X_df = X_df.iloc[:, :2] # we only take the first two features.
y_series = pd.Series(y)
return X_df, y_series
@pytest.fixture(scope="module")
def iris_signature():
return ModelSignature(
inputs=Schema(
[
ColSpec(name="sepal length (cm)", type=DataType.double),
ColSpec(name="sepal width (cm)", type=DataType.double),
]
),
outputs=Schema([ColSpec(type=DataType.long)]),
)
@pytest.fixture(scope="module")
def sklearn_knn_model(iris_df):
X, y = iris_df
knn_model = knn.KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
@pytest.fixture(scope="module")
def sklearn_logreg_model(iris_df):
X, y = iris_df
linear_lr = glm.LogisticRegression()
linear_lr.fit(X, y)
return ModelWithData(model=linear_lr, inference_data=X)
@pytest.fixture(scope="module")
def sklearn_gaussian_model(iris_df):
X, y = iris_df
gaussian_nb = nb.GaussianNB()
gaussian_nb.fit(X, y)
return ModelWithData(model=gaussian_nb, inference_data=X)
@pytest.fixture(scope="module")
def sklearn_custom_transformer_model(sklearn_knn_model, iris_df):
def transform(vec):
return vec + 1
transformer = SKFunctionTransformer(transform, validate=True)
pipeline = SKPipeline([("custom_transformer", transformer), ("knn", sklearn_knn_model.model)])
X, _ = iris_df
return ModelWithData(pipeline, inference_data=X)
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
@pytest.fixture
def sklearn_custom_env(tmp_path):
conda_env = os.path.join(tmp_path, "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn", "pytest"])
return conda_env
def test_model_save_load(sklearn_knn_model, model_path):
knn_model = sklearn_knn_model.model
mlflow.sklearn.save_model(sk_model=knn_model, path=model_path)
reloaded_knn_model = mlflow.sklearn.load_model(model_uri=model_path)
reloaded_knn_pyfunc = pyfunc.load_model(model_uri=model_path)
np.testing.assert_array_equal(
knn_model.predict(sklearn_knn_model.inference_data),
reloaded_knn_model.predict(sklearn_knn_model.inference_data),
)
np.testing.assert_array_equal(
reloaded_knn_model.predict(sklearn_knn_model.inference_data),
reloaded_knn_pyfunc.predict(sklearn_knn_model.inference_data),
)
def test_model_save_behavior_with_preexisting_folders(sklearn_knn_model, tmp_path):
sklearn_model_path = tmp_path / "sklearn_model_empty_exists"
sklearn_model_path.mkdir()
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
sklearn_model_path = tmp_path / "sklearn_model_filled_exists"
sklearn_model_path.mkdir()
(sklearn_model_path / "foo.txt").write_text("dummy content")
with pytest.raises(MlflowException, match="already exists and is not empty"):
mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)
def test_signature_and_examples_are_saved_correctly(sklearn_knn_model, iris_signature):
data = sklearn_knn_model.inference_data
model = sklearn_knn_model.model
example_ = data[:3]
for signature in (None, iris_signature):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.sklearn.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
if signature is None and example is None:
assert mlflow_model.signature is None
else:
assert mlflow_model.signature == iris_signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
np.testing.assert_array_equal(_read_example(mlflow_model, path), example)
def test_model_load_from_remote_uri_succeeds(sklearn_knn_model, model_path, mock_s3_bucket):
mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_knn_model = mlflow.sklearn.load_model(model_uri=model_uri)
np.testing.assert_array_equal(
sklearn_knn_model.model.predict(sklearn_knn_model.inference_data),
reloaded_knn_model.predict(sklearn_knn_model.inference_data),
)
def test_model_log(sklearn_logreg_model, model_path):
with TempDir(chdr=True, remove_on_exit=True) as tmp:
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "linear"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn"])
model_info = mlflow.sklearn.log_model(
sklearn_logreg_model.model,
name=artifact_path,
conda_env=conda_env,
)
reloaded_logsklearn_knn_model = mlflow.sklearn.load_model(
model_uri=model_info.model_uri
)
np.testing.assert_array_equal(
sklearn_logreg_model.model.predict(sklearn_logreg_model.inference_data),
reloaded_logsklearn_knn_model.predict(sklearn_logreg_model.inference_data),
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]["conda"]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
def test_log_model_calls_register_model(sklearn_logreg_model):
artifact_path = "linear"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn"])
model_info = mlflow.sklearn.log_model(
sklearn_logreg_model.model,
name=artifact_path,
conda_env=conda_env,
registered_model_name="AdsModel1",
)
assert_register_model_called_with_local_model_path(
register_model_mock=mlflow.tracking._model_registry.fluent._register_model,
model_uri=model_info.model_uri,
registered_model_name="AdsModel1",
)
def test_log_model_call_register_model_to_uc(configure_client_for_uc, sklearn_logreg_model):
artifact_path = "linear"
mock_model_version = ModelVersion(
name="AdsModel1",
version=1,
creation_timestamp=123,
status=ModelVersionStatus.to_string(ModelVersionStatus.READY),
)
with (
mock.patch.object(UcModelRegistryStore, "create_registered_model"),
mock.patch.object(
UcModelRegistryStore,
"create_model_version",
return_value=mock_model_version,
autospec=True,
) as mock_create_mv,
TempDir(chdr=True, remove_on_exit=True) as tmp,
):
with mlflow.start_run() as run:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn"])
model_info = mlflow.sklearn.log_model(
sklearn_logreg_model.model,
name=artifact_path,
conda_env=conda_env,
registered_model_name="AdsModel1",
)
source = model_info.artifact_path
[(args, kwargs)] = mock_create_mv.call_args_list
assert args[1:] == ("AdsModel1", source, run.info.run_id, [], None, None)
assert kwargs["local_model_path"].startswith(tempfile.gettempdir())
def test_log_model_no_registered_model_name(sklearn_logreg_model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn"])
mlflow.sklearn.log_model(
sklearn_logreg_model.model,
name=artifact_path,
conda_env=conda_env,
)
mlflow.tracking._model_registry.fluent._register_model.assert_not_called()
def test_custom_transformer_can_be_saved_and_loaded_with_cloudpickle_format(
sklearn_custom_transformer_model, tmp_path
):
custom_transformer_model = sklearn_custom_transformer_model.model
# Because the model contains a customer transformer that is not defined at the top level of the
# current test module, we expect pickle to fail when attempting to serialize it. In contrast,
# we expect cloudpickle to successfully locate the transformer definition and serialize the
# model successfully.
pickle_format_model_path = os.path.join(tmp_path, "pickle_model")
with pytest.raises(AttributeError, match="Can't pickle local object"):
mlflow.sklearn.save_model(
sk_model=custom_transformer_model,
path=pickle_format_model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE,
)
cloudpickle_format_model_path = os.path.join(tmp_path, "cloud_pickle_model")
mlflow.sklearn.save_model(
sk_model=custom_transformer_model,
path=cloudpickle_format_model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE,
)
reloaded_custom_transformer_model = mlflow.sklearn.load_model(
model_uri=cloudpickle_format_model_path
)
np.testing.assert_array_equal(
custom_transformer_model.predict(sklearn_custom_transformer_model.inference_data),
reloaded_custom_transformer_model.predict(sklearn_custom_transformer_model.inference_data),
)
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
sklearn_knn_model, model_path, sklearn_custom_env
):
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model, path=model_path, conda_env=sklearn_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != sklearn_custom_env
with open(sklearn_custom_env) as f:
sklearn_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == sklearn_custom_env_parsed
def test_model_save_persists_requirements_in_mlflow_model_directory(
sklearn_knn_model, model_path, sklearn_custom_env
):
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model, path=model_path, conda_env=sklearn_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(sklearn_custom_env, saved_pip_req_path)
def test_log_model_with_pip_requirements(sklearn_knn_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", pip_requirements=str(req_file)
)
_assert_pip_requirements(model_info.model_uri, [expected_mlflow_version, "a"], strict=True)
# List of requirements
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, "b", "-c constraints.txt"],
["a"],
strict=True,
)
def test_log_model_with_extra_pip_requirements(sklearn_knn_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
default_reqs = mlflow.sklearn.get_default_pip_requirements(include_cloudpickle=True)
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", extra_pip_requirements=str(req_file)
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a"]
)
# List of requirements
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", extra_pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", extra_pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_model_save_accepts_conda_env_as_dict(sklearn_knn_model, model_path):
conda_env = dict(mlflow.sklearn.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model, path=model_path, conda_env=conda_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
sklearn_knn_model, sklearn_custom_env
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
conda_env=sklearn_custom_env,
)
model_uri = model_info.model_uri
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != sklearn_custom_env
with open(sklearn_custom_env) as f:
sklearn_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == sklearn_custom_env_parsed
def test_model_log_persists_requirements_in_mlflow_model_directory(
sklearn_knn_model, sklearn_custom_env
):
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model,
name="model",
conda_env=sklearn_custom_env,
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(sklearn_custom_env, saved_pip_req_path)
def test_model_save_throws_exception_if_serialization_format_is_unrecognized(
sklearn_knn_model, model_path
):
with pytest.raises(MlflowException, match="Unrecognized serialization format") as exc:
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model,
path=model_path,
serialization_format="not a valid format",
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# The unsupported serialization format should have been detected prior to the execution of
# any directory creation or state-mutating persistence logic that would prevent a second
# serialization call with the same model path from succeeding
assert not os.path.exists(model_path)
mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path)
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sklearn_knn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path)
_assert_pip_requirements(
model_path, mlflow.sklearn.get_default_pip_requirements(include_cloudpickle=True)
)
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
sklearn_knn_model,
):
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(sklearn_knn_model.model, name="model")
_assert_pip_requirements(
model_info.model_uri, mlflow.sklearn.get_default_pip_requirements(include_cloudpickle=True)
)
def test_model_save_uses_cloudpickle_serialization_format_by_default(sklearn_knn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path)
sklearn_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME
)
assert "serialization_format" in sklearn_conf
assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE
def test_model_log_uses_cloudpickle_serialization_format_by_default(sklearn_knn_model):
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(sklearn_knn_model.model, name="model")
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
sklearn_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME
)
assert "serialization_format" in sklearn_conf
assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE
def test_model_save_with_cloudpickle_format_adds_cloudpickle_to_conda_environment(
sklearn_knn_model, model_path
):
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model,
path=model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE,
)
sklearn_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME
)
assert "serialization_format" in sklearn_conf
assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
pip_deps = [
dependency
for dependency in saved_conda_env_parsed["dependencies"]
if type(dependency) == dict and "pip" in dependency
]
assert len(pip_deps) == 1
assert any("cloudpickle" in pip_dep for pip_dep in pip_deps[0]["pip"])
def test_model_save_without_cloudpickle_format_does_not_add_cloudpickle_to_conda_environment(
sklearn_knn_model, model_path
):
non_cloudpickle_serialization_formats = list(mlflow.sklearn.SUPPORTED_SERIALIZATION_FORMATS)
non_cloudpickle_serialization_formats.remove(mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE)
for serialization_format in non_cloudpickle_serialization_formats:
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model,
path=model_path,
serialization_format=serialization_format,
)
sklearn_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME
)
assert "serialization_format" in sklearn_conf
assert sklearn_conf["serialization_format"] == serialization_format
pyfunc_conf = _get_flavor_configuration(
model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME
)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert all(
"cloudpickle" not in dependency for dependency in saved_conda_env_parsed["dependencies"]
)
def test_load_pyfunc_succeeds_for_older_models_with_pyfunc_data_field(
sklearn_knn_model, model_path
):
"""
This test verifies that scikit-learn models saved in older versions of MLflow are loaded
successfully by ``mlflow.pyfunc.load_model``. These older models specify a pyfunc ``data``
field referring directly to a serialized scikit-learn model file. In contrast, newer models
omit the ``data`` field.
"""
mlflow.sklearn.save_model(
sk_model=sklearn_knn_model.model,
path=model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE,
)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME)
sklearn_conf = model_conf.flavors.get(mlflow.sklearn.FLAVOR_NAME)
assert sklearn_conf is not None
assert pyfunc_conf is not None
pyfunc_conf[pyfunc.DATA] = sklearn_conf["pickled_model"]
reloaded_knn_pyfunc = pyfunc.load_model(model_uri=model_path)
np.testing.assert_array_equal(
sklearn_knn_model.model.predict(sklearn_knn_model.inference_data),
reloaded_knn_pyfunc.predict(sklearn_knn_model.inference_data),
)
def test_add_pyfunc_flavor_only_when_model_defines_predict(model_path):
from sklearn.cluster import AgglomerativeClustering
sk_model = AgglomerativeClustering()
assert not hasattr(sk_model, "predict")
mlflow.sklearn.save_model(
sk_model=sk_model,
path=model_path,
serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE,
)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
assert pyfunc.FLAVOR_NAME not in model_conf.flavors
def test_pyfunc_serve_and_score(sklearn_knn_model):
model, inference_dataframe = sklearn_knn_model
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
model, name=artifact_path, input_example=inference_dataframe
)
inference_payload = load_serving_example(model_info.model_uri)
resp = pyfunc_serve_and_score_model(
model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = pd.DataFrame(
data=json.loads(resp.content.decode("utf-8"))["predictions"]
).values.squeeze()
np.testing.assert_array_almost_equal(scores, model.predict(inference_dataframe))
def test_sklearn_compatible_with_mlflow_2_4_0(sklearn_knn_model, tmp_path):
model, inference_dataframe = sklearn_knn_model
model_predict = model.predict(inference_dataframe)
# save test model
tmp_path.joinpath("MLmodel").write_text(
f"""
artifact_path: model
flavors:
python_function:
env:
conda: conda.yaml
virtualenv: python_env.yaml
loader_module: mlflow.sklearn
model_path: model.pkl
predict_fn: predict
python_version: 3.10.19
sklearn:
code: null
pickled_model: model.pkl
serialization_format: cloudpickle
sklearn_version: {sklearn.__version__}
mlflow_version: 2.4.0
model_uuid: c9833d74b1ff4013a1c9eff05d39eeef
run_id: 8146a2ae86104f5b853351e600fc9d7b
utc_time_created: '2023-07-04 07:19:43.561797'
"""
)
tmp_path.joinpath("python_env.yaml").write_text(
"""
python: 3.10.19
build_dependencies:
- pip==25.1.1
- setuptools==80.4.0
- wheel==0.45.1
dependencies:
- -r requirements.txt
"""
)
tmp_path.joinpath("requirements.txt").write_text(
f"""
mlflow==2.4.0
cloudpickle
numpy
psutil
scikit-learn=={sklearn.__version__}
scipy
"""
)
with open(tmp_path / "model.pkl", "wb") as out:
pickle.dump(model, out, protocol=pickle.DEFAULT_PROTOCOL)
assert Version(mlflow.__version__) > Version("2.4.0")
model_uri = str(tmp_path)
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
# predict is compatible
local_predict = pyfunc_loaded.predict(inference_dataframe)
np.testing.assert_array_almost_equal(local_predict, model_predict)
# model serving is compatible
resp = pyfunc_serve_and_score_model(
model_uri,
data=pd.DataFrame(inference_dataframe),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = pd.DataFrame(
data=json.loads(resp.content.decode("utf-8"))["predictions"]
).values.squeeze()
np.testing.assert_array_almost_equal(scores, model_predict)
# Issues a warning if params are specified prior to MLflow support in 2.5.0
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
pyfunc_loaded.predict(inference_dataframe, params={"top_k": 2})
mock_warning.assert_called_with(
"`params` can only be specified at inference time if the model signature defines a params "
"schema. This model does not define a params schema. Ignoring provided params: "
"['top_k']"
)
def test_log_model_with_code_paths(sklearn_knn_model):
artifact_path = "model"
with (
mlflow.start_run(),
mock.patch("mlflow.sklearn._add_code_from_conf_to_system_path") as add_mock,
):
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name=artifact_path, code_paths=[__file__]
)
_compare_logged_code_paths(__file__, model_info.model_uri, mlflow.sklearn.FLAVOR_NAME)
mlflow.sklearn.load_model(model_uri=model_info.model_uri)
add_mock.assert_called()
@pytest.mark.parametrize(
"predict_fn", ["predict", "predict_proba", "predict_log_proba", "predict_joint_log_proba"]
)
def test_log_model_with_custom_pyfunc_predict_fn(sklearn_gaussian_model, predict_fn):
if Version(sklearn.__version__) < Version("1.2.0") and predict_fn == "predict_joint_log_proba":
pytest.skip("predict_joint_log_proba is not available in scikit-learn < 1.2.0")
model, inference_dataframe = sklearn_gaussian_model
expected_scores = getattr(model, predict_fn)(inference_dataframe)
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
model, name=artifact_path, pyfunc_predict_fn=predict_fn
)
loaded_model = pyfunc.load_model(model_info.model_uri)
actual_scores = loaded_model.predict(inference_dataframe)
np.testing.assert_array_almost_equal(expected_scores, actual_scores)
def test_virtualenv_subfield_points_to_correct_path(sklearn_logreg_model, model_path):
mlflow.sklearn.save_model(sklearn_logreg_model.model, path=model_path)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
python_env_path = Path(model_path, pyfunc_conf[pyfunc.ENV]["virtualenv"])
assert python_env_path.exists()
assert python_env_path.is_file()
def test_model_save_load_with_metadata(sklearn_knn_model, model_path):
mlflow.sklearn.save_model(
sklearn_knn_model.model, path=model_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_metadata(sklearn_knn_model):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
metadata={"metadata_key": "metadata_value"},
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_signature_inference(sklearn_knn_model, iris_signature):
artifact_path = "model"
X = sklearn_knn_model.inference_data
example = X.iloc[[0]]
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name=artifact_path, input_example=example
)
mlflow_model = Model.load(model_info.model_uri)
assert mlflow_model.signature == iris_signature
def test_model_size_bytes(sklearn_logreg_model, tmp_path):
mlflow.sklearn.save_model(sklearn_logreg_model.model, path=tmp_path)
# expected size only counts for files saved before the MLmodel file is saved
model_file = tmp_path.joinpath("model.pkl")
with model_file.open("rb") as fp:
expected_size = len(fp.read())
mlmodel = yaml.safe_load(tmp_path.joinpath("MLmodel").read_bytes())
assert mlmodel["model_size_bytes"] == expected_size
def test_model_registration_metadata_handling(sklearn_knn_model, tmp_path):
artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name="test",
)
model_uri = "models:/test/1"
artifact_repository = get_artifact_repository(model_uri)
dst_full = tmp_path.joinpath("full")
dst_full.mkdir()
artifact_repository.download_artifacts("MLmodel", dst_full)
# This validates that the models artifact repo will not attempt to create a
# "registered model metadata" file if the source of an artifact download is a file.
assert os.listdir(dst_full) == ["MLmodel"]
def test_pipeline_predict_proba(sklearn_knn_model, model_path):
knn_model = sklearn_knn_model.model
pipeline = make_pipeline(knn_model)
mlflow.sklearn.save_model(sk_model=pipeline, path=model_path, pyfunc_predict_fn="predict_proba")
reloaded_knn_pyfunc = pyfunc.load_model(model_uri=model_path)
np.testing.assert_array_equal(
knn_model.predict_proba(sklearn_knn_model.inference_data),
reloaded_knn_pyfunc.predict(sklearn_knn_model.inference_data),
)
def test_get_raw_model(sklearn_knn_model):
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sklearn_knn_model.model, name="model", input_example=sklearn_knn_model.inference_data
)
pyfunc_model = pyfunc.load_model(model_info.model_uri)
raw_model = pyfunc_model.get_raw_model()
assert type(raw_model) == type(sklearn_knn_model.model)
np.testing.assert_array_equal(
raw_model.predict(sklearn_knn_model.inference_data),
sklearn_knn_model.model.predict(sklearn_knn_model.inference_data),
)
| ModelWithData |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axislines.py | {
"start": 2005,
"end": 3762
} | class ____:
"""
Base class for axis helper.
Subclasses should define the methods listed below. The *axes*
argument will be the ``.axes`` attribute of the caller artist. ::
# Construct the spine.
def get_line_transform(self, axes):
return transform
def get_line(self, axes):
return path
# Construct the label.
def get_axislabel_transform(self, axes):
return transform
def get_axislabel_pos_angle(self, axes):
return (x, y), angle
# Construct the ticks.
def get_tick_transform(self, axes):
return transform
def get_tick_iterators(self, axes):
# A pair of iterables (one for major ticks, one for minor ticks)
# that yield (tick_position, tick_angle, tick_label).
return iter_major, iter_minor
"""
def __init__(self, nth_coord):
self.nth_coord = nth_coord
def update_lim(self, axes):
pass
def get_nth_coord(self):
return self.nth_coord
def _to_xy(self, values, const):
"""
Create a (*values.shape, 2)-shape array representing (x, y) pairs.
The other coordinate is filled with the constant *const*.
Example::
>>> self.nth_coord = 0
>>> self._to_xy([1, 2, 3], const=0)
array([[1, 0],
[2, 0],
[3, 0]])
"""
if self.nth_coord == 0:
return np.stack(np.broadcast_arrays(values, const), axis=-1)
elif self.nth_coord == 1:
return np.stack(np.broadcast_arrays(const, values), axis=-1)
else:
raise ValueError("Unexpected nth_coord")
| _AxisArtistHelperBase |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 39217,
"end": 42350
} | class ____:
def test_1_nfev_simplicial(self):
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
def fun(x):
fun.nfev += 1
return rosen(x)
fun.nfev = 0
result = shgo(fun, bounds)
np.testing.assert_equal(fun.nfev, result.nfev)
def test_1_nfev_sobol(self):
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
def fun(x):
fun.nfev += 1
return rosen(x)
fun.nfev = 0
result = shgo(fun, bounds, sampling_method='sobol')
np.testing.assert_equal(fun.nfev, result.nfev)
def test_vector_constraint():
# gh15514
def quad(x):
x = np.asarray(x)
return [np.sum(x ** 2)]
nlc = NonlinearConstraint(quad, [2.2], [3])
oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
assert np.all(np.sum((res.x)**2) >= 2.2)
assert np.all(np.sum((res.x) ** 2) <= 3.0)
assert res.success
@pytest.mark.filterwarnings("ignore:delta_grad")
def test_trust_constr():
def quad(x):
x = np.asarray(x)
return [np.sum(x ** 2)]
nlc = NonlinearConstraint(quad, [2.6], [3])
minimizer_kwargs = {'method': 'trust-constr'}
# note that we don't supply the constraints in minimizer_kwargs,
# so if the final result obeys the constraints we know that shgo
# passed them on to 'trust-constr'
res = shgo(
rosen,
[(0, 10), (0, 10)],
constraints=nlc,
sampling_method='sobol',
minimizer_kwargs=minimizer_kwargs
)
assert np.all(np.sum((res.x)**2) >= 2.6)
assert np.all(np.sum((res.x) ** 2) <= 3.0)
assert res.success
def test_equality_constraints():
# gh16260
bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1.
def faulty(x):
return x[0] + x[1]
nlc = NonlinearConstraint(faulty, 3.9, 3.9)
res = shgo(rosen, bounds=bounds, constraints=nlc)
assert_allclose(np.sum(res.x), 3.9)
def faulty(x):
return x[0] + x[1] - 3.9
constraints = {'type': 'eq', 'fun': faulty}
res = shgo(rosen, bounds=bounds, constraints=constraints)
assert_allclose(np.sum(res.x), 3.9)
bounds = [(0, 1.0)] * 4
# sum of variable should equal 1.
def faulty(x):
return x[0] + x[1] + x[2] + x[3] - 1
# options = {'minimize_every_iter': True, 'local_iter':10}
constraints = {'type': 'eq', 'fun': faulty}
res = shgo(
lambda x: - np.prod(x),
bounds=bounds,
constraints=constraints,
sampling_method='sobol'
)
assert_allclose(np.sum(res.x), 1.0)
def test_gh16971():
def cons(x):
return np.sum(x**2) - 0
c = {'fun': cons, 'type': 'ineq'}
minimizer_kwargs = {
'method': 'COBYLA',
'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05}
}
s = SHGO(
rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs
)
assert s.minimizer_kwargs['method'].lower() == 'cobyla'
assert s.minimizer_kwargs['options']['catol'] == 0.05
| TestShgoReturns |
python | huggingface__transformers | src/transformers/models/qwen2/configuration_qwen2.py | {
"start": 917,
"end": 8633
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 22016):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*, defaults to 4096):
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
max_window_layers (`int`, *optional*, defaults to 28):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
layer_types (`list`, *optional*):
Attention pattern for each layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Qwen2Model, Qwen2Config
>>> # Initializing a Qwen2 style configuration
>>> configuration = Qwen2Config()
>>> # Initializing a model from the Qwen2-7B style configuration
>>> model = Qwen2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Qwen2`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151936,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 22016,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 32,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_sliding_window: Optional[bool] = False,
sliding_window: Optional[int] = 4096,
max_window_layers: Optional[int] = 28,
layer_types: Optional[list[str]] = None,
attention_dropout: Optional[float] = 0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window if self.use_sliding_window else None
self.max_window_layers = max_window_layers
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if self.sliding_window is not None and i >= self.max_window_layers
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Qwen2Config"]
| Qwen2Config |
python | kamyu104__LeetCode-Solutions | Python/maximize-the-number-of-target-nodes-after-connecting-trees-i.py | {
"start": 3588,
"end": 5429
} | class ____(object):
def maxTargetNodes(self, edges1, edges2, k):
"""
:type edges1: List[List[int]]
:type edges2: List[List[int]]
:type k: int
:rtype: List[int]
"""
def tree_dp(adj, k):
def dfs1(u, p):
for v in adj[u]:
if v == p:
continue
dfs1(v, u)
dp[u][0] += 1
for v in adj[u]:
if v == p:
continue
for d in xrange(k):
dp[u][d+1] += dp[v][d]
def dfs2(u, p, curr):
def update(v, u, curr):
new_curr = [0]*len(curr)
for d in xrange(len(curr)-1):
new_curr[d+1] = curr[d]+(dp[u][d]-(dp[v][d-1] if d-1 >= 0 else 0))
return new_curr
for v in adj[u]:
if v == p:
continue
dfs2(v, u, update(v, u, curr))
result[u] = sum(dp[u][i]+curr[i] for i in xrange(len(curr)))
result = [0]*len(adj)
k = min(k, len(adj)-1)
if k == -1:
return result
dp = [[0]*(k+1) for _ in xrange(len(adj))]
dfs1(0, -1)
dfs2(0, -1, [0]*(k+1))
return result
def find_adj(edges):
adj = [[] for _ in xrange(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return adj
adj2 = find_adj(edges2)
mx = max(tree_dp(adj2, k-1))
adj1 = find_adj(edges1)
return [mx+x for x in tree_dp(adj1, k)]
# Time: O(n^2 + m^2)
# Space: O(n + m)
# brute force, bfs
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_eager_test.py | {
"start": 1006,
"end": 2223
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']]),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]),
dict(pylist=[[[1, 2], [3, 4]], [[5, 6], [], [7, 8]]], ragged_rank=1),
])
def testRaggedTensorToList(self, pylist, ragged_rank=None):
rt = ragged_factory_ops.constant(pylist, ragged_rank)
self.assertAllEqual(rt, pylist)
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']],
expected_str="[[b'a', b'b'], [b'c']]"),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]],
expected_str='[[[1, 2], [3]], [[4, 5, 6], [], [7]]]'),
dict(pylist=[[0, 1], np.arange(2, 2000)],
expected_str='[[0, 1], [2, 3, 4, ..., 1997, 1998, 1999]]'),
dict(pylist=[[[0, 1]], [np.arange(2, 2000)]],
expected_str='[[[0, 1]],\n [[2, 3, 4, ..., 1997, 1998, 1999]]]'),
])
def testRaggedTensorStr(self, pylist, expected_str):
rt = ragged_factory_ops.constant(pylist)
self.assertEqual(str(rt), f'<tf.RaggedTensor {expected_str}>')
if __name__ == '__main__':
ops.enable_eager_execution()
googletest.main()
| RaggedTensorTest |
python | getsentry__sentry | tests/sentry/rules/history/endpoints/test_project_rule_stats.py | {
"start": 1048,
"end": 2669
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-rule-stats-index"
def test(self) -> None:
rule = Rule.objects.create(project=self.event.project)
rule_2 = Rule.objects.create(project=self.event.project)
history = []
for i in range(3):
for _ in range(i + 1):
history.append(
RuleFireHistory(
project=rule.project,
rule=rule,
group=self.group,
date_added=before_now(hours=i + 1),
)
)
for i in range(2):
history.append(
RuleFireHistory(
project=rule_2.project,
rule=rule_2,
group=self.group,
date_added=before_now(hours=i + 1),
)
)
RuleFireHistory.objects.bulk_create(history)
self.login_as(self.user)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
rule.id,
start=before_now(days=6),
end=before_now(days=0),
)
assert len(resp.data) == 144
now = timezone.now().replace(minute=0, second=0, microsecond=0)
assert [r for r in resp.data[-4:]] == [
{"date": now - timedelta(hours=3), "count": 3},
{"date": now - timedelta(hours=2), "count": 2},
{"date": now - timedelta(hours=1), "count": 1},
{"date": now, "count": 0},
]
| ProjectRuleStatsIndexEndpointTest |
python | scipy__scipy | scipy/ndimage/tests/test_interpolation.py | {
"start": 17819,
"end": 22702
} | class ____:
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_map_coordinates01(self, order, dtype, xp):
if is_jax(xp) and order > 1:
pytest.xfail("jax map_coordinates requires order <= 1")
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
expected = xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
if xp.isdtype(data.dtype, 'complex floating'):
data = data - 1j * data
expected = expected - 1j * expected
idx = np.indices(data.shape)
idx -= 1
idx = xp.asarray(idx)
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_map_coordinates02(self, order, xp):
if is_jax(xp):
if order > 1:
pytest.xfail("jax map_coordinates requires order <= 1")
if order == 1:
pytest.xfail("output differs. jax bug?")
data = xp.asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = np.indices(data.shape, np.float64)
idx -= 0.5
idx = xp.asarray(idx)
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out1, out2)
@skip_xp_backends("jax.numpy", reason="`order` is required in jax")
def test_map_coordinates03(self, xp):
data = _asarray([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]], order='F', xp=xp)
idx = np.indices(data.shape) - 1
idx = xp.asarray(idx)
out = ndimage.map_coordinates(data, idx)
expected = xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
assert_array_almost_equal(out, expected)
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
idx = np.indices(data[::2, ...].shape) - 1
idx = xp.asarray(idx)
out = ndimage.map_coordinates(data[::2, ...], idx)
assert_array_almost_equal(out, xp.asarray([[0, 0, 0, 0],
[0, 4, 1, 3]]))
assert_array_almost_equal(out, ndimage.shift(data[::2, ...], (1, 1)))
idx = np.indices(data[:, ::2].shape) - 1
idx = xp.asarray(idx)
out = ndimage.map_coordinates(data[:, ::2], idx)
assert_array_almost_equal(out, xp.asarray([[0, 0], [0, 4], [0, 7]]))
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
@skip_xp_backends(np_only=True)
def test_map_coordinates_endianness_with_output_parameter(self, xp):
# output parameter given as array or dtype with either endianness
# see issue #4127
# NB: NumPy-only
data = np.asarray([[1, 2], [7, 6]])
expected = np.asarray([[0, 0], [0, 1]])
idx = np.indices(data.shape)
idx -= 1
for out in [
data.dtype,
data.dtype.newbyteorder(),
np.empty_like(expected),
np.empty_like(expected).astype(expected.dtype.newbyteorder())
]:
returned = ndimage.map_coordinates(data, idx, output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, expected)
@skip_xp_backends(np_only=True, reason='string `output` is numpy-specific')
def test_map_coordinates_with_string_output(self, xp):
data = xp.asarray([[1]])
idx = np.indices(data.shape)
idx = xp.asarray(idx)
out = ndimage.map_coordinates(data, idx, output='f')
assert out.dtype is np.dtype('f')
assert_array_almost_equal(out, xp.asarray([[1]]))
@pytest.mark.skip_xp_backends(cpu_only=True)
@pytest.mark.skipif('win32' in sys.platform or np.intp(0).itemsize < 8,
reason='do not run on 32 bit or windows '
'(no sparse memory)')
def test_map_coordinates_large_data(self, xp):
# check crash on large data
try:
n = 30000
# a = xp.reshape(xp.empty(n**2, dtype=xp.float32), (n, n))
a = np.empty(n**2, dtype=np.float32).reshape(n, n)
# fill the part we might read
a[n - 3:, n - 3:] = 0
ndimage.map_coordinates(
xp.asarray(a), xp.asarray([[n - 1.5], [n - 1.5]]), order=1
)
except MemoryError as e:
raise pytest.skip('Not enough memory available') from e
@make_xp_test_case(ndimage.affine_transform)
| TestMapCoordinates |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.