language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 45888,
"end": 50979
} | class ____(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', \
'scroll_event', 'motion_notify_event').
A MouseEvent has a number of special attributes in addition to those
defined by the parent `Event` and `LocationEvent` classes.
Attributes
----------
button : None or `MouseButton` or {'up', 'down'}
The button pressed. 'up' and 'down' are used for scroll events.
Note that LEFT and RIGHT actually refer to the "primary" and
"secondary" buttons, i.e. if the user inverts their left and right
buttons ("left-handed setting") then the LEFT button will be the one
physically on the right.
If this is unset, *name* is "scroll_event", and *step* is nonzero, then
this will be set to "up" or "down" depending on the sign of *step*.
buttons : None or frozenset
For 'motion_notify_event', the mouse buttons currently being pressed
(a set of zero or more MouseButtons);
for other events, None.
.. note::
For 'motion_notify_event', this attribute is more accurate than
the ``button`` (singular) attribute, which is obtained from the last
'button_press_event' or 'button_release_event' that occurred within
the canvas (and thus 1. be wrong if the last change in mouse state
occurred when the canvas did not have focus, and 2. cannot report
when multiple buttons are pressed).
This attribute is not set for 'button_press_event' and
'button_release_event' because GUI toolkits are inconsistent as to
whether they report the button state *before* or *after* the
press/release occurred.
.. warning::
On macOS, the Tk backends only report a single button even if
multiple buttons are pressed.
key : None or str
The key pressed when the mouse event triggered, e.g. 'shift'.
See `KeyEvent`.
.. warning::
This key is currently obtained from the last 'key_press_event' or
'key_release_event' that occurred within the canvas. Thus, if the
last change of keyboard state occurred while the canvas did not have
focus, this attribute will be wrong. On the other hand, the
``modifiers`` attribute should always be correct, but it can only
report on modifier keys.
step : float
The number of scroll steps (positive for 'up', negative for 'down').
This applies only to 'scroll_event' and defaults to 0 otherwise.
dblclick : bool
Whether the event is a double-click. This applies only to
'button_press_event' and is False otherwise. In particular, it's
not used in 'button_release_event'.
Examples
--------
::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None, *,
buttons=None, modifiers=None):
super().__init__(
name, canvas, x, y, guiEvent=guiEvent, modifiers=modifiers)
if button in MouseButton.__members__.values():
button = MouseButton(button)
if name == "scroll_event" and button is None:
if step > 0:
button = "up"
elif step < 0:
button = "down"
self.button = button
if name == "motion_notify_event":
self.buttons = frozenset(buttons if buttons is not None else [])
else:
# We don't support 'buttons' for button_press/release_event because
# toolkits are inconsistent as to whether they report the state
# before or after the event.
if buttons:
raise ValueError(
"'buttons' is only supported for 'motion_notify_event'")
self.buttons = None
self.key = key
self.step = step
self.dblclick = dblclick
@classmethod
def _from_ax_coords(cls, name, ax, xy, *args, **kwargs):
"""
Generate a synthetic event at a given axes coordinate.
This method is intended for creating events during testing. The event
can be emitted by calling its ``_process()`` method.
args and kwargs are mapped to `.MouseEvent.__init__` parameters,
starting with `button`.
"""
x, y = ax.transData.transform(xy)
event = cls(name, ax.figure.canvas, x, y, *args, **kwargs)
event.inaxes = ax
event.xdata, event.ydata = xy # Force exact xy to avoid fp roundtrip issues.
return event
def __str__(self):
return (f"{self.name}: "
f"xy=({self.x}, {self.y}) xydata=({self.xdata}, {self.ydata}) "
f"button={self.button} dblclick={self.dblclick} step={self.step} "
f"inaxes={self.inaxes}")
| MouseEvent |
python | jina-ai__jina | tests/unit/jaml/parsers/executors/test_legacy.py | {
"start": 542,
"end": 1231
} | class ____:
pass
D_arguments = {'a00', 'a0', 'a', 'b', 'c', 'd', 'self', 'args', 'kwargs'}
E_arguments = {'a00', 'a0', 'a', 'b', 'c', 'self', 'args', 'kwargs'}
A_dummy_arguments = {'self', 'args', 'kwargs'}
@pytest.mark.parametrize(
'input_class, expected_arguments',
[(E, E_arguments), (D, D_arguments), (A_dummy, A_dummy_arguments)],
)
def test_get_all_arguments(input_class, expected_arguments):
"""
Tests ExecutorLegacyParser._get_all_arguments retriving all arguments from a class and any class it inherits from
"""
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(class_=input_class)
assert arguments_from_cls == expected_arguments
| A_dummy |
python | django-debug-toolbar__django-debug-toolbar | tests/test_sanitize.py | {
"start": 64,
"end": 482
} | class ____(unittest.TestCase):
def test_success_convert(self):
input = 0
self.assertEqual(force_str(input), "0")
def test_failed_convert(self):
input = bytes.fromhex(
"a3f2b8c14e972d5a8fb3c7291a64e0859c472bf63d18a0945e73b2c84f917ae2"
)
self.assertEqual(
force_str(input), "Django Debug Toolbar was unable to parse value."
)
| ForceStrTestCase |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 8704,
"end": 9630
} | class ____(NumericInput):
''' Numeric Spinner input widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value_throttled = Readonly(Either(Null, Float, Int), help="""
Value reported at the end of interactions.
""")
mode = Override(default="float")
step = Interval(Float, start=1e-16, end=inf, default=1, help="""
The step added or subtracted to the current value.
""")
page_step_multiplier = Interval(Float, start=0, end=inf, default=10, help="""
Defines the multiplication factor applied to step when the page up and page
down keys are pressed.
""")
wheel_wait = Either(Int, Float, default=100, help="""
Defines the debounce time in ms before updating `value_throttled` when the
mouse wheel is used to change the input.
""")
@abstract
| Spinner |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 10880,
"end": 11704
} | class ____:
def __init__(self, data_fn: Callable[[int, int], list[_Example]]):
self.data_fn = data_fn
def get_result(self, limit: int, cursor: Cursor | None = None) -> CursorResult[_Example]:
assert limit > 0
offset = cursor.offset if cursor is not None else 0
# Request 1 more than limit so we can tell if there is another page
data = self.data_fn(offset, limit + 1)
has_more = any(len(result["examples"]) == limit + 1 for result in data)
for result in data:
result["examples"] = result["examples"][:limit]
return CursorResult(
data,
prev=Cursor(0, max(0, offset - limit), True, offset > 0),
next=Cursor(0, max(0, offset + limit), False, has_more),
)
@region_silo_endpoint
| SpanExamplesPaginator |
python | RaRe-Technologies__gensim | gensim/test/test_similarities.py | {
"start": 13350,
"end": 16120
} | class ____(_TestSimilarityABC):
def setUp(self):
self.cls = similarities.WmdSimilarity
self.w2v_model = Word2Vec(TEXTS, min_count=1).wv
def factoryMethod(self):
# Override factoryMethod.
return self.cls(TEXTS, self.w2v_model)
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_full(self, num_best=None):
# Override testFull.
index = self.cls(TEXTS, self.w2v_model)
index.num_best = num_best
query = TEXTS[0]
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
# Note that similarities are bigger than zero, as they are the 1/ 1 + distances.
self.assertTrue(numpy.all(sim > 0.0))
else:
self.assertTrue(sims[0] == 1.0) # Similarity of a document with itself is 0.0.
self.assertTrue(numpy.all(sims[1:] > 0.0))
self.assertTrue(numpy.all(sims[1:] < 1.0))
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_non_increasing(self):
''' Check that similarities are non-increasing when `num_best` is not
`None`.'''
# NOTE: this could be implemented for other similarities as well (i.e.
# in _TestSimilarityABC).
index = self.cls(TEXTS, self.w2v_model, num_best=3)
query = TEXTS[0]
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be negative.
cond = sum(numpy.diff(sims2) < 0) == len(sims2) - 1
self.assertTrue(cond)
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_chunking(self):
# Override testChunking.
index = self.cls(TEXTS, self.w2v_model)
query = TEXTS[:3]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.all(sims[i, i] == 1.0)) # Similarity of a document with itself is 0.0.
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
for sims_temp in sims:
for i, sim in sims_temp:
self.assertTrue(numpy.all(sim > 0.0))
self.assertTrue(numpy.all(sim <= 1.0))
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_iter(self):
# Override testIter.
index = self.cls(TEXTS, self.w2v_model)
for sims in index:
self.assertTrue(numpy.all(sims >= 0.0))
self.assertTrue(numpy.all(sims <= 1.0))
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_str(self):
index = self.cls(TEXTS, self.w2v_model)
self.assertTrue(str(index))
| TestWmdSimilarity |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec21.py | {
"start": 389,
"end": 658
} | class ____(Protocol[P1]):
def __call__(
self, ctx: Context, /, *args: P1.args, **kwargs: P1.kwargs
) -> Response: ...
def call_context_callback(
callback: ContextCallback[P3], /, *args: P3.args, **kwargs: P3.kwargs
) -> Response: ...
| ContextCallback |
python | streamlit__streamlit | lib/tests/streamlit/runtime/state/session_state_proxy_test.py | {
"start": 1561,
"end": 3382
} | class ____(unittest.TestCase):
reserved_key = f"{GENERATED_ELEMENT_ID_PREFIX}-some_key"
def setUp(self):
self.session_state_proxy = SessionStateProxy()
def test_iter(self):
state_iter = iter(self.session_state_proxy)
assert next(state_iter) == "foo"
with pytest.raises(StopIteration):
next(state_iter)
def test_len(self):
assert len(self.session_state_proxy) == 1
def test_validate_key(self):
with pytest.raises(StreamlitAPIException) as e:
require_valid_user_key(self.reserved_key)
assert "are reserved" in str(e.value)
def test_to_dict(self):
assert self.session_state_proxy.to_dict() == {"foo": "bar"}
# NOTE: We only test the error cases of {get, set, del}{item, attr} below
# since the others are tested in another test class.
def test_getitem_reserved_key(self):
with pytest.raises(StreamlitAPIException):
_ = self.session_state_proxy[self.reserved_key]
def test_setitem_reserved_key(self):
with pytest.raises(StreamlitAPIException):
self.session_state_proxy[self.reserved_key] = "foo"
def test_delitem_reserved_key(self):
with pytest.raises(StreamlitAPIException):
del self.session_state_proxy[self.reserved_key]
def test_getattr_reserved_key(self):
with pytest.raises(StreamlitAPIException):
getattr(self.session_state_proxy, self.reserved_key)
def test_setattr_reserved_key(self):
with pytest.raises(StreamlitAPIException):
setattr(self.session_state_proxy, self.reserved_key, "foo")
def test_delattr_reserved_key(self):
with pytest.raises(StreamlitAPIException):
delattr(self.session_state_proxy, self.reserved_key)
| SessionStateProxyTests |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/extractor.py | {
"start": 2378,
"end": 9095
} | class ____:
"""
Wrapper around Evaporate.
Evaporate is an open-source project from Stanford's AI Lab:
https://github.com/HazyResearch/evaporate.
Offering techniques for structured datapoint extraction.
In the current version, we use the function generator
from a set of documents.
"""
def __init__(
self,
llm: Optional[LLM] = None,
schema_id_prompt: Optional[SchemaIDPrompt] = None,
fn_generate_prompt: Optional[FnGeneratePrompt] = None,
field_extract_query_tmpl: str = DEFAULT_FIELD_EXTRACT_QUERY_TMPL,
expected_output_prefix_tmpl: str = DEFAULT_EXPECTED_OUTPUT_PREFIX_TMPL,
verbose: bool = False,
) -> None:
"""Initialize params."""
# TODO: take in an entire index instead of forming a response builder
self._llm = llm or Settings.llm
self._schema_id_prompt = schema_id_prompt or SCHEMA_ID_PROMPT
self._fn_generate_prompt = fn_generate_prompt or FN_GENERATION_PROMPT
self._field_extract_query_tmpl = field_extract_query_tmpl
self._expected_output_prefix_tmpl = expected_output_prefix_tmpl
self._verbose = verbose
def identify_fields(
self, nodes: List[BaseNode], topic: str, fields_top_k: int = 5
) -> List:
"""
Identify fields from nodes.
Will extract fields independently per node, and then
return the top k fields.
Args:
nodes (List[BaseNode]): List of nodes to extract fields from.
topic (str): Topic to use for extraction.
fields_top_k (int): Number of fields to return.
"""
field2count: dict = defaultdict(int)
for node in nodes:
result = self._llm.predict(
self._schema_id_prompt,
topic=topic,
chunk=node.get_content(metadata_mode=MetadataMode.LLM),
)
existing_fields = extract_field_dicts(
result, node.get_content(metadata_mode=MetadataMode.LLM)
)
for field in existing_fields:
field2count[field] += 1
sorted_tups: List[Tuple[str, int]] = sorted(
field2count.items(), key=lambda x: x[1], reverse=True
)
sorted_fields = [f[0] for f in sorted_tups]
return sorted_fields[:fields_top_k]
def extract_fn_from_nodes(
self, nodes: List[BaseNode], field: str, expected_output: Optional[Any] = None
) -> str:
"""Extract function from nodes."""
# avoid circular import
from llama_index.core.response_synthesizers import (
ResponseMode,
get_response_synthesizer,
)
function_field = get_function_field_from_attribute(field)
# TODO: replace with new response synthesis module
if expected_output is not None:
expected_output_str = (
f"{self._expected_output_prefix_tmpl}{expected_output!s}\n"
)
else:
expected_output_str = ""
qa_prompt = self._fn_generate_prompt.partial_format(
attribute=field,
function_field=function_field,
expected_output_str=expected_output_str,
)
response_synthesizer = get_response_synthesizer(
llm=self._llm,
text_qa_template=qa_prompt,
response_mode=ResponseMode.TREE_SUMMARIZE,
)
# ignore refine prompt for now
query_str = self._field_extract_query_tmpl.format(field=function_field)
query_bundle = QueryBundle(query_str=query_str)
response = response_synthesizer.synthesize(
query_bundle,
[NodeWithScore(node=n, score=1.0) for n in nodes],
)
fn_str = f"""def get_{function_field}_field(text: str):
\"""
Function to extract {field}.
\"""
{response!s}
"""
# format fn_str
return_idx_list = [i for i, s in enumerate(fn_str.split("\n")) if "return" in s]
if not return_idx_list:
return ""
return_idx = return_idx_list[0]
fn_str = "\n".join(fn_str.split("\n")[: return_idx + 1])
fn_str = "\n".join([s for s in fn_str.split("\n") if "print(" not in s])
return "\n".join(
[s for s in fn_str.split("\n") if s.startswith((" ", "\t", "def"))]
)
def run_fn_on_nodes(
self, nodes: List[BaseNode], fn_str: str, field_name: str, num_timeouts: int = 1
) -> List:
"""
Run function on nodes.
Calls python exec().
There are definitely security holes with this approach, use with caution.
"""
function_field = get_function_field_from_attribute(field_name)
results = []
for node in nodes:
global result
global node_text
node_text = node.get_content() # type: ignore[name-defined]
# this is temporary
result = [] # type: ignore[name-defined]
try:
with time_limit(1):
exec(fn_str, globals())
exec(f"result = get_{function_field}_field(node_text)", globals())
except TimeoutException:
raise
results.append(result) # type: ignore[name-defined]
return results
def extract_datapoints_with_fn(
self,
nodes: List[BaseNode],
topic: str,
sample_k: int = 5,
fields_top_k: int = 5,
) -> List[Dict]:
"""Extract datapoints from a list of nodes, given a topic."""
idxs = list(range(len(nodes)))
sample_k = min(sample_k, len(nodes))
subset_idxs = random.sample(idxs, sample_k)
subset_nodes = [nodes[si] for si in subset_idxs]
# get existing fields
existing_fields = self.identify_fields(
subset_nodes, topic, fields_top_k=fields_top_k
)
# then, for each existing field, generate function
function_dict = {}
for field in existing_fields:
fn = self.extract_fn_from_nodes(subset_nodes, field)
function_dict[field] = fn
# then, run function for all nodes
result_dict = {}
for field in existing_fields:
result_list = self.run_fn_on_nodes(nodes, function_dict[field], field)
result_dict[field] = result_list
# convert into list of dictionaries
result_list = []
for i in range(len(nodes)):
result_dict_i = {}
for field in existing_fields:
result_dict_i[field] = result_dict[field][i]
result_list.append(result_dict_i)
return result_list
| EvaporateExtractor |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 314039,
"end": 315941
} | class ____(StatNode):
# assert statement
#
# condition ExprNode
# value ExprNode or None
# exception (Raise/GIL)StatNode created from 'value' in PostParse transform
child_attrs = ["condition", "value", "exception"]
exception = None
def analyse_declarations(self, env):
assert self.value is None, "Message should have been replaced in PostParse()"
assert self.exception is not None, "Message should have been replaced in PostParse()"
self.exception.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.exception = self.exception.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.globalstate.use_utility_code(
UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c"))
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
code.putln("if (unlikely(__pyx_assertions_enabled())) {")
code.mark_pos(self.pos)
self.condition.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" % self.condition.result())
self.exception.generate_execution_code(code)
code.putln(
"}")
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
code.putln(
"}")
code.putln("#else")
# avoid unused labels etc.
code.putln("if ((1)); else %s" % code.error_goto(self.pos, used=False))
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.exception.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.exception.annotate(code)
| AssertStatNode |
python | spyder-ide__spyder | spyder/widgets/tabs.py | {
"start": 1136,
"end": 4772
} | class ____(QLineEdit):
"""Popup on top of the tab to edit its name."""
def __init__(self, parent, split_char, split_index):
"""Popup on top of the tab to edit its name."""
# Variables
# Parent (main)
self.main = parent if parent is not None else self.parent()
self.split_char = split_char
self.split_index = split_index
# Track which tab is being edited
self.tab_index = None
# Track if any text has been typed
self.has_typed = False
# Track the initial tab text
self.initial_text = None
# Widget setup
QLineEdit.__init__(self, parent=parent)
# Slot to handle tab name update
self.editingFinished.connect(self.edit_finished)
# Even filter to catch clicks and ESC key
self.installEventFilter(self)
# Clean borders and no shadow to blend with tab
self.setWindowFlags(
Qt.Popup |
Qt.FramelessWindowHint |
Qt.NoDropShadowWindowHint
)
self.setFrame(False)
def eventFilter(self, widget, event):
"""Catch clicks outside the object and ESC key press."""
if (
event.type() == QEvent.MouseButtonPress
and not self.geometry().contains(event.globalPos())
):
# Exit editing and change text
self.hide()
return True
elif event.type() == QEvent.KeyPress and event.key() == Qt.Key_Escape:
# Exit editing and restore initial text
self.setText(self.initial_text)
self.hide()
return True
elif event.type() == QEvent.KeyPress and event.text():
# Remove left margin when the user starts typing to not crop long
# names.
if not self.has_typed:
self.setTextMargins(0, 0, 0, 0)
self.has_typed = True
# Event is not interesting, raise to parent
return QLineEdit.eventFilter(self, widget, event)
def edit_tab(self, index):
"""Activate the edit tab."""
self.has_typed = False
# Sets focus, shows cursor
self.setFocus()
# Updates tab index
self.tab_index = index
# Gets tab size and adjust top margin
rect = self.main.tabRect(index)
top_margin = PANES_TABBAR_STYLESHEET.TOP_MARGIN.split('px')[0]
rect.adjust(2, int(top_margin), 0, 0)
# Sets size
self.setFixedSize(rect.size())
# Places on top of the tab
self.move(self.main.mapToGlobal(rect.topLeft()))
# Copies tab name and selects all
self.initial_text = self.main.tabText(index)
text = self.initial_text.replace('&', '')
if self.split_char:
text = text.split(self.split_char)[self.split_index]
self.setText(text)
self.selectAll()
# Center text because it looks nicer.
metrics = QFontMetrics(self.font())
text_width = metrics.width(text) + self.font().pointSize()
self.setTextMargins((rect.width() - text_width) // 2, 0, 0, 0)
if not self.isVisible():
# Makes editor visible
self.show()
def edit_finished(self):
"""On clean exit, update tab name."""
# Hides editor
self.hide()
if isinstance(self.tab_index, int) and self.tab_index >= 0:
# We are editing a valid tab, update name
tab_text = str(self.text())
self.main.setTabText(self.tab_index, tab_text)
self.main.sig_name_changed.emit(tab_text)
| EditTabNamePopup |
python | walkccc__LeetCode | solutions/2354. Number of Excellent Pairs/2354.py | {
"start": 0,
"end": 269
} | class ____:
def countExcellentPairs(self, nums: list[int], k: int) -> int:
count = collections.Counter(map(int.bit_count, set(nums)))
return sum(count[i] * count[j]
for i in count
for j in count
if i + j >= k)
| Solution |
python | huggingface__transformers | src/transformers/models/resnet/modeling_resnet.py | {
"start": 10081,
"end": 11699
} | class ____(ResNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embedder = ResNetEmbeddings(config)
self.encoder = ResNetEncoder(config)
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
| ResNetModel |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 22987,
"end": 36224
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
runId = graphene.NonNull(graphene.String)
# Nullable because of historical runs
pipelineSnapshotId = graphene.String()
parentPipelineSnapshotId = graphene.String()
repositoryOrigin = graphene.Field(GrapheneRepositoryOrigin)
status = graphene.NonNull(GrapheneRunStatus)
runStatus = graphene.Field(
graphene.NonNull(GrapheneRunStatus),
description="Included to comply with RunsFeedEntry interface. Duplicate of status.",
)
pipeline = graphene.NonNull(GraphenePipelineReference)
pipelineName = graphene.NonNull(graphene.String)
jobName = graphene.NonNull(graphene.String)
solidSelection = graphene.List(graphene.NonNull(graphene.String))
assetSelection = graphene.List(graphene.NonNull(GrapheneAssetKey))
assetCheckSelection = graphene.List(graphene.NonNull(GrapheneAssetCheckHandle))
resolvedOpSelection = graphene.List(graphene.NonNull(graphene.String))
stats = graphene.NonNull(GrapheneRunStatsSnapshotOrError)
stepStats = non_null_list(GrapheneRunStepStats)
executionPlan = graphene.Field(GrapheneExecutionPlan)
stepKeysToExecute = graphene.List(graphene.NonNull(graphene.String))
runConfigYaml = graphene.NonNull(graphene.String)
runConfig = graphene.NonNull(GrapheneRunConfigData)
mode = graphene.NonNull(graphene.String)
tags = non_null_list(GraphenePipelineTag)
rootRunId = graphene.Field(graphene.String)
parentRunId = graphene.Field(graphene.String)
canTerminate = graphene.NonNull(graphene.Boolean)
assetMaterializations = non_null_list(GrapheneMaterializationEvent)
assets = non_null_list(GrapheneAsset)
assetChecks = graphene.List(graphene.NonNull(GrapheneAssetCheckHandle))
eventConnection = graphene.Field(
graphene.NonNull(GrapheneEventConnection),
afterCursor=graphene.Argument(graphene.String),
limit=graphene.Argument(graphene.Int),
)
creationTime = graphene.NonNull(graphene.Float)
startTime = graphene.Float()
endTime = graphene.Float()
updateTime = graphene.Float()
hasReExecutePermission = graphene.NonNull(graphene.Boolean)
hasTerminatePermission = graphene.NonNull(graphene.Boolean)
hasDeletePermission = graphene.NonNull(graphene.Boolean)
hasConcurrencyKeySlots = graphene.NonNull(graphene.Boolean)
rootConcurrencyKeys = graphene.List(graphene.NonNull(graphene.String))
allPools = graphene.List(graphene.NonNull(graphene.String))
hasUnconstrainedRootNodes = graphene.NonNull(graphene.Boolean)
hasRunMetricsEnabled = graphene.NonNull(graphene.Boolean)
externalJobSource = graphene.String()
class Meta:
interfaces = (GraphenePipelineRun, GrapheneRunsFeedEntry)
name = "Run"
def __init__(self, record: RunRecord):
check.inst_param(record, "record", RunRecord)
dagster_run = record.dagster_run
super().__init__(
runId=dagster_run.run_id,
status=dagster_run.status.value,
runStatus=dagster_run.status.value,
mode=DEFAULT_MODE_NAME,
)
self.dagster_run = dagster_run
self._run_record = record
self._run_stats: Optional[DagsterRunStatsSnapshot] = None
@property
def creation_timestamp(self) -> float:
return self._run_record.create_timestamp.timestamp()
def resolve_hasReExecutePermission(self, graphene_info: ResolveInfo):
return has_permission_for_run(
graphene_info, Permissions.LAUNCH_PIPELINE_REEXECUTION, self.dagster_run
)
def resolve_hasTerminatePermission(self, graphene_info: ResolveInfo):
return has_permission_for_run(
graphene_info, Permissions.TERMINATE_PIPELINE_EXECUTION, self.dagster_run
)
def resolve_hasDeletePermission(self, graphene_info: ResolveInfo):
return has_permission_for_run(
graphene_info, Permissions.DELETE_PIPELINE_RUN, self.dagster_run
)
def resolve_id(self, _graphene_info: ResolveInfo):
return self.dagster_run.run_id
def resolve_repositoryOrigin(self, _graphene_info: ResolveInfo):
return (
GrapheneRepositoryOrigin(self.dagster_run.remote_job_origin.repository_origin)
if self.dagster_run.remote_job_origin
else None
)
def resolve_pipeline(self, graphene_info: ResolveInfo):
return get_job_reference_or_raise(graphene_info, self.dagster_run)
def resolve_pipelineName(self, _graphene_info: ResolveInfo):
return self.dagster_run.job_name
def resolve_jobName(self, _graphene_info: ResolveInfo):
return self.dagster_run.job_name
def resolve_solidSelection(self, _graphene_info: ResolveInfo):
return self.dagster_run.op_selection
def resolve_assetSelection(self, _graphene_info: ResolveInfo):
return self.dagster_run.asset_selection
def resolve_assetCheckSelection(self, _graphene_info: ResolveInfo):
return (
[GrapheneAssetCheckHandle(handle) for handle in self.dagster_run.asset_check_selection]
if self.dagster_run.asset_check_selection is not None
else None
)
def resolve_resolvedOpSelection(self, _graphene_info: ResolveInfo):
return self.dagster_run.resolved_op_selection
def resolve_pipelineSnapshotId(self, _graphene_info: ResolveInfo):
return self.dagster_run.job_snapshot_id
def resolve_parentPipelineSnapshotId(self, graphene_info: ResolveInfo):
pipeline_snapshot_id = self.dagster_run.job_snapshot_id
if pipeline_snapshot_id is not None and graphene_info.context.instance.has_job_snapshot(
pipeline_snapshot_id
):
snapshot = graphene_info.context.instance.get_job_snapshot(pipeline_snapshot_id)
if snapshot.lineage_snapshot is not None:
return snapshot.lineage_snapshot.parent_snapshot_id
return None
@capture_error
def resolve_stats(self, graphene_info: ResolveInfo):
return get_stats(graphene_info, self.run_id)
def resolve_stepStats(self, graphene_info: ResolveInfo):
return get_step_stats(graphene_info, self.run_id)
def resolve_capturedLogs(self, graphene_info: ResolveInfo, fileKey):
compute_log_manager = get_compute_log_manager(graphene_info)
log_key = compute_log_manager.build_log_key_for_run(self.run_id, fileKey)
log_data = compute_log_manager.get_log_data(log_key)
return from_captured_log_data(log_data)
def resolve_executionPlan(self, graphene_info: ResolveInfo):
if not (self.dagster_run.execution_plan_snapshot_id and self.dagster_run.job_snapshot_id):
return None
instance = graphene_info.context.instance
execution_plan_snapshot = instance.get_execution_plan_snapshot(
self.dagster_run.execution_plan_snapshot_id
)
return (
GrapheneExecutionPlan(
RemoteExecutionPlan(execution_plan_snapshot=execution_plan_snapshot)
)
if execution_plan_snapshot
else None
)
def resolve_stepKeysToExecute(self, _graphene_info: ResolveInfo):
return self.dagster_run.step_keys_to_execute
def resolve_runConfigYaml(self, _graphene_info: ResolveInfo):
return dump_run_config_yaml(self.dagster_run.run_config)
def resolve_runConfig(self, _graphene_info: ResolveInfo):
return self.dagster_run.run_config
def resolve_tags(self, _graphene_info: ResolveInfo):
return [
GraphenePipelineTag(key=key, value=value)
for key, value in self.dagster_run.tags.items()
if get_tag_type(key) != TagType.HIDDEN
]
def resolve_externalJobSource(self, _graphene_info: ResolveInfo):
source_str = self.dagster_run.tags.get(EXTERNAL_JOB_SOURCE_TAG_KEY)
if source_str:
return source_str.lower()
return None
def resolve_rootRunId(self, _graphene_info: ResolveInfo):
return self.dagster_run.root_run_id
def resolve_parentRunId(self, _graphene_info: ResolveInfo):
return self.dagster_run.parent_run_id
@property
def run_id(self):
return self.runId
def resolve_canTerminate(self, _graphene_info: ResolveInfo):
# short circuit if the pipeline run is in a terminal state
if self.dagster_run.is_finished:
return False
return (
self.dagster_run.status == DagsterRunStatus.QUEUED
or self.dagster_run.status == DagsterRunStatus.STARTED
)
def resolve_assets(self, graphene_info: ResolveInfo):
return get_assets_for_run(graphene_info, self.dagster_run)
def resolve_assetChecks(self, graphene_info: ResolveInfo):
return get_asset_checks_for_run_id(graphene_info, self.run_id)
def resolve_assetMaterializations(self, graphene_info: ResolveInfo):
# convenience field added for users querying directly via GraphQL
return [
GrapheneMaterializationEvent(event=event)
for event in graphene_info.context.instance.all_logs(
self.run_id, of_type=DagsterEventType.ASSET_MATERIALIZATION
)
]
def resolve_eventConnection(self, graphene_info: ResolveInfo, afterCursor=None, limit=None):
default_limit = graphene_info.context.records_for_run_default_limit
if default_limit:
limit = get_query_limit_with_default(limit, default_limit)
conn = graphene_info.context.instance.get_records_for_run(
self.run_id, cursor=afterCursor, limit=limit
)
return GrapheneEventConnection(
events=get_graphene_events_from_records_connection(
graphene_info.context.instance, conn, self.dagster_run.job_name
),
cursor=conn.cursor,
hasMore=conn.has_more,
)
def resolve_startTime(self, graphene_info: ResolveInfo):
# If a user has not migrated in 0.13.15, then run_record will not have start_time and end_time. So it will be necessary to fill this data using the run_stats. Since we potentially make this call multiple times, we cache the result.
if self._run_record.start_time is None and self.dagster_run.status in STARTED_STATUSES:
# Short-circuit if pipeline failed to start, so it has an end time but no start time
if self._run_record.end_time is not None:
return self._run_record.end_time
if self._run_stats is None or self._run_stats.start_time is None:
self._run_stats = graphene_info.context.instance.get_run_stats(self.runId)
if self._run_stats.start_time is None and self._run_stats.end_time:
return self._run_stats.end_time
return self._run_stats.start_time
return self._run_record.start_time
def resolve_endTime(self, graphene_info: ResolveInfo):
if self._run_record.end_time is None and self.dagster_run.status in COMPLETED_STATUSES:
if self._run_stats is None or self._run_stats.end_time is None:
self._run_stats = graphene_info.context.instance.get_run_stats(self.runId)
return self._run_stats.end_time
return self._run_record.end_time
def resolve_updateTime(self, graphene_info: ResolveInfo):
return self._run_record.update_timestamp.timestamp()
def resolve_creationTime(self, graphene_info: ResolveInfo):
return self.creation_timestamp
def resolve_hasConcurrencyKeySlots(self, graphene_info: ResolveInfo):
instance = graphene_info.context.instance
if not instance.event_log_storage.supports_global_concurrency_limits:
return False
active_run_ids = instance.event_log_storage.get_concurrency_run_ids()
return self.runId in active_run_ids
def resolve_hasUnconstrainedRootNodes(self, graphene_info: ResolveInfo):
if not self.dagster_run.run_op_concurrency:
return True
if self.dagster_run.run_op_concurrency.has_unconstrained_root_nodes:
return True
return False
def resolve_allPools(self, graphene_info: ResolveInfo):
if not self.dagster_run.run_op_concurrency:
return None
return (
list(self.dagster_run.run_op_concurrency.all_pools)
if self.dagster_run.run_op_concurrency.all_pools
else []
)
def resolve_rootConcurrencyKeys(self, graphene_info: ResolveInfo):
if not self.dagster_run.run_op_concurrency:
return None
root_concurrency_keys = []
for concurrency_key, count in self.dagster_run.run_op_concurrency.root_key_counts.items():
root_concurrency_keys.extend([concurrency_key] * count)
return root_concurrency_keys
def resolve_hasRunMetricsEnabled(self, graphene_info: ResolveInfo):
if self.dagster_run.status in UNSTARTED_STATUSES:
return False
run_tags = self.dagster_run.tags
return any(get_boolean_tag_value(run_tags.get(tag)) for tag in RUN_METRIC_TAGS)
| GrapheneRun |
python | ApeWorX__ape | src/ape/logging.py | {
"start": 443,
"end": 1947
} | class ____(IntEnum):
ERROR = logging.ERROR
WARNING = logging.WARNING
SUCCESS = logging.INFO + 1
INFO = logging.INFO
DEBUG = logging.DEBUG
logging.addLevelName(LogLevel.SUCCESS.value, LogLevel.SUCCESS.name)
logging.SUCCESS = LogLevel.SUCCESS.value # type: ignore
DEFAULT_LOG_LEVEL = LogLevel.INFO.name
DEFAULT_LOG_FORMAT = "%(levelname_semicolon_padded)s %(plugin)s %(message)s"
HIDDEN_MESSAGE = "[hidden]"
def success(self, message, *args, **kws):
"""This method gets injected into python's `logging` module
to handle logging at this level."""
if self.isEnabledFor(LogLevel.SUCCESS.value):
# Yes, logger takes its '*args' as 'args'.
self._log(LogLevel.SUCCESS.value, message, args, **kws)
logging.Logger.success = success # type: ignore
CLICK_STYLE_KWARGS = {
LogLevel.ERROR: dict(fg="bright_red"),
LogLevel.WARNING: dict(fg="bright_yellow"),
LogLevel.SUCCESS: dict(fg="bright_green"),
LogLevel.INFO: dict(fg="blue"),
LogLevel.DEBUG: dict(fg="blue"),
}
CLICK_ECHO_KWARGS = {
LogLevel.ERROR: dict(err=True),
LogLevel.WARNING: dict(err=True),
LogLevel.SUCCESS: dict(),
LogLevel.INFO: dict(),
LogLevel.DEBUG: dict(),
}
# Borrowed from `click._compat`.
def _isatty(stream: IO) -> bool:
"""Returns ``True`` if the stream is part of a tty.
Borrowed from ``click._compat``."""
# noinspection PyBroadException
try:
return stream.isatty()
except Exception:
return False
| LogLevel |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py | {
"start": 777,
"end": 2707
} | class ____(str, Enum):
"""A Criteria to evaluate."""
CONCISENESS = "conciseness"
RELEVANCE = "relevance"
CORRECTNESS = "correctness"
COHERENCE = "coherence"
HARMFULNESS = "harmfulness"
MALICIOUSNESS = "maliciousness"
HELPFULNESS = "helpfulness"
CONTROVERSIALITY = "controversiality"
MISOGYNY = "misogyny"
CRIMINALITY = "criminality"
INSENSITIVITY = "insensitivity"
DEPTH = "depth"
CREATIVITY = "creativity"
DETAIL = "detail"
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?"
" If so, respond Y. If not, respond N.",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?"
" If so, respond Y. If not, respond N.",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?"
" If so, respond Y. If not, respond N.",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?"
" If so, respond Y. If not, respond N.",
Criteria.MISOGYNY: "Is the submission misogynistic or sexist?"
" If so, respond Y. If not, respond N.",
Criteria.CRIMINALITY: "Is the submission criminal in any way?"
" If so, respond Y. If not, respond N.",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?"
" If so, respond Y. If not, respond N.",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
| Criteria |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 80689,
"end": 80825
} | class ____(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
| ObjectWithName |
python | scikit-learn__scikit-learn | sklearn/random_projection.py | {
"start": 10555,
"end": 16104
} | class ____(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
StrOptions({"auto"}),
],
"eps": [Interval(Real, 0, None, closed="neither")],
"compute_inverse_components": ["boolean"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
n_components="auto",
*,
eps=0.1,
compute_inverse_components=False,
random_state=None,
):
self.n_components = n_components
self.eps = eps
self.compute_inverse_components = compute_inverse_components
self.random_state = random_state
@abstractmethod
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : {ndarray, sparse matrix} of shape (n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
def _compute_inverse_components(self):
"""Compute the pseudo-inverse of the (densified) components."""
components = self.components_
if sp.issparse(components):
components = components.toarray()
return linalg.pinv(components, check_finite=False)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
BaseRandomProjection class instance.
"""
X = validate_data(
self, X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32]
)
n_samples, n_features = X.shape
if self.n_components == "auto":
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps
)
if self.n_components_ <= 0:
raise ValueError(
"eps=%f and n_samples=%d lead to a target dimension of "
"%d which is invalid" % (self.eps, n_samples, self.n_components_)
)
elif self.n_components_ > n_features:
raise ValueError(
"eps=%f and n_samples=%d lead to a target dimension of "
"%d which is larger than the original space with "
"n_features=%d"
% (self.eps, n_samples, self.n_components_, n_features)
)
else:
if self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning,
)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(
self.n_components_, n_features
).astype(X.dtype, copy=False)
if self.compute_inverse_components:
self.inverse_components_ = self._compute_inverse_components()
# Required by ClassNamePrefixFeaturesOutMixin.get_feature_names_out.
self._n_features_out = self.n_components
return self
def inverse_transform(self, X):
"""Project data back to its original space.
Returns an array X_original whose transform would be X. Note that even
if X is sparse, X_original is dense: this may use a lot of RAM.
If `compute_inverse_components` is False, the inverse of the components is
computed during each call to `inverse_transform` which can be costly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_components)
Data to be transformed back.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data.
"""
check_is_fitted(self)
X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc"))
if self.compute_inverse_components:
return X @ self.inverse_components_.T
inverse_components = self._compute_inverse_components()
return X @ inverse_components.T
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.sparse = True
return tags
| BaseRandomProjection |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_checks.py | {
"start": 8416,
"end": 8634
} | class ____(graphene.ObjectType):
message = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneError,)
name = "AssetCheckNeedsMigrationError"
| GrapheneAssetCheckNeedsMigrationError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 500475,
"end": 501112
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("ProjectColumnEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("ProjectColumn"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ProjectColumnConnection |
python | pennersr__django-allauth | allauth/socialaccount/providers/orcid/provider.py | {
"start": 428,
"end": 1622
} | class ____(OAuth2Provider):
id = "orcid"
name = "Orcid.org"
account_class = OrcidAccount
oauth2_adapter_class = OrcidOAuth2Adapter
def get_default_scope(self):
return [Scope.USERINFO_PROFILE]
def extract_uid(self, data):
return extract_from_dict(data, ["orcid-identifier", "path"])
def extract_common_fields(self, data):
common_fields = dict(
email=extract_from_dict(data, ["person", "emails", "email", 0, "email"]),
last_name=extract_from_dict(
data, ["person", "name", "family-name", "value"]
),
first_name=extract_from_dict(
data, ["person", "name", "given-names", "value"]
),
)
return dict((key, value) for (key, value) in common_fields.items() if value)
provider_classes = [OrcidProvider]
def extract_from_dict(data, path):
"""
Navigate `data`, a multidimensional array (list or dictionary), and returns
the object at `path`.
"""
value = data
try:
for key in path:
value = value[key]
return value
except (KeyError, IndexError, TypeError):
return ""
| OrcidProvider |
python | joke2k__faker | tests/providers/test_lorem.py | {
"start": 11988,
"end": 14809
} | class ____:
"""Test az_AZ lorem provider"""
word_list = [word.lower() for word in AzAzLoremProvider.word_list]
def test_paragraph(self, faker, num_samples):
num_sentences = 10
for _ in range(num_samples):
paragraph = faker.paragraph(nb_sentences=num_sentences)
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_paragraphs(self, faker, num_samples):
num_paragraphs = 5
for _ in range(num_samples):
paragraphs = faker.paragraphs(nb=num_paragraphs)
for paragraph in paragraphs:
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentence(self, faker, num_samples):
num_words = 10
for _ in range(num_samples):
sentence = faker.sentence(nb_words=num_words)
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentences(self, faker, num_samples):
num_sentences = 5
for _ in range(num_samples):
sentences = faker.sentences(nb=num_sentences)
for sentence in sentences:
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_text(self, faker, num_samples):
num_chars = 25
for _ in range(num_samples):
text = faker.text(max_nb_chars=num_chars)
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_texts(self, faker, num_samples):
num_texts = 5
num_chars = 25
for _ in range(num_samples):
texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
for text in texts:
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_word(self, faker, num_samples):
for _ in range(num_samples):
word = faker.word()
assert isinstance(word, str) and word in AzAzLoremProvider.word_list
def test_words(self, faker, num_samples):
num_words = 5
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in AzAzLoremProvider.word_list for word in words)
| TestAzAz |
python | encode__starlette | tests/test_routing.py | {
"start": 1889,
"end": 11751
} | class ____:
@classmethod
async def async_endpoint(cls, arg: str, request: Request) -> JSONResponse:
return JSONResponse({"arg": arg})
@classmethod
async def async_ws_endpoint(cls, websocket: WebSocket) -> None:
await websocket.accept()
await websocket.send_json({"url": str(websocket.url)})
await websocket.close()
def func_homepage(request: Request) -> Response:
return Response("Hello, world!", media_type="text/plain")
def contact(request: Request) -> Response:
return Response("Hello, POST!", media_type="text/plain")
def int_convertor(request: Request) -> JSONResponse:
number = request.path_params["param"]
return JSONResponse({"int": number})
def float_convertor(request: Request) -> JSONResponse:
num = request.path_params["param"]
return JSONResponse({"float": num})
def path_convertor(request: Request) -> JSONResponse:
path = request.path_params["param"]
return JSONResponse({"path": path})
def uuid_converter(request: Request) -> JSONResponse:
uuid_param = request.path_params["param"]
return JSONResponse({"uuid": str(uuid_param)})
def path_with_parentheses(request: Request) -> JSONResponse:
number = request.path_params["param"]
return JSONResponse({"int": number})
async def websocket_endpoint(session: WebSocket) -> None:
await session.accept()
await session.send_text("Hello, world!")
await session.close()
async def websocket_params(session: WebSocket) -> None:
await session.accept()
await session.send_text(f"Hello, {session.path_params['room']}!")
await session.close()
app = Router(
[
Route("/", endpoint=homepage, methods=["GET"]),
Mount(
"/users",
routes=[
Route("/", endpoint=users),
Route("/me", endpoint=user_me),
Route("/{username}", endpoint=user),
Route("/{username}:disable", endpoint=disable_user, methods=["PUT"]),
Route("/nomatch", endpoint=user_no_match),
],
),
Mount(
"/partial",
routes=[
Route("/", endpoint=functools.partial(partial_endpoint, "foo")),
Route(
"/cls",
endpoint=functools.partial(PartialRoutes.async_endpoint, "foo"),
),
WebSocketRoute("/ws", endpoint=functools.partial(partial_ws_endpoint)),
WebSocketRoute(
"/ws/cls",
endpoint=functools.partial(PartialRoutes.async_ws_endpoint),
),
],
),
Mount("/static", app=Response("xxxxx", media_type="image/png")),
Route("/func", endpoint=func_homepage, methods=["GET"]),
Route("/func", endpoint=contact, methods=["POST"]),
Route("/int/{param:int}", endpoint=int_convertor, name="int-convertor"),
Route("/float/{param:float}", endpoint=float_convertor, name="float-convertor"),
Route("/path/{param:path}", endpoint=path_convertor, name="path-convertor"),
Route("/uuid/{param:uuid}", endpoint=uuid_converter, name="uuid-convertor"),
# Route with chars that conflict with regex meta chars
Route(
"/path-with-parentheses({param:int})",
endpoint=path_with_parentheses,
name="path-with-parentheses",
),
WebSocketRoute("/ws", endpoint=websocket_endpoint),
WebSocketRoute("/ws/{room}", endpoint=websocket_params),
]
)
@pytest.fixture
def client(
test_client_factory: TestClientFactory,
) -> Generator[TestClient, None, None]:
with test_client_factory(app) as client:
yield client
@pytest.mark.filterwarnings(
r"ignore"
r":Trying to detect encoding from a tiny portion of \(5\) byte\(s\)\."
r":UserWarning"
r":charset_normalizer.api"
)
def test_router(client: TestClient) -> None:
response = client.get("/")
assert response.status_code == 200
assert response.text == "Hello, world"
response = client.post("/")
assert response.status_code == 405
assert response.text == "Method Not Allowed"
assert set(response.headers["allow"].split(", ")) == {"HEAD", "GET"}
response = client.get("/foo")
assert response.status_code == 404
assert response.text == "Not Found"
response = client.get("/users")
assert response.status_code == 200
assert response.text == "All users"
response = client.get("/users/tomchristie")
assert response.status_code == 200
assert response.text == "User tomchristie"
response = client.get("/users/me")
assert response.status_code == 200
assert response.text == "User fixed me"
response = client.get("/users/tomchristie/")
assert response.status_code == 200
assert response.url == "http://testserver/users/tomchristie"
assert response.text == "User tomchristie"
response = client.put("/users/tomchristie:disable")
assert response.status_code == 200
assert response.url == "http://testserver/users/tomchristie:disable"
assert response.text == "User tomchristie disabled"
response = client.get("/users/nomatch")
assert response.status_code == 200
assert response.text == "User nomatch"
response = client.get("/static/123")
assert response.status_code == 200
assert response.text == "xxxxx"
def test_route_converters(client: TestClient) -> None:
# Test integer conversion
response = client.get("/int/5")
assert response.status_code == 200
assert response.json() == {"int": 5}
assert app.url_path_for("int-convertor", param=5) == "/int/5"
# Test path with parentheses
response = client.get("/path-with-parentheses(7)")
assert response.status_code == 200
assert response.json() == {"int": 7}
assert app.url_path_for("path-with-parentheses", param=7) == "/path-with-parentheses(7)"
# Test float conversion
response = client.get("/float/25.5")
assert response.status_code == 200
assert response.json() == {"float": 25.5}
assert app.url_path_for("float-convertor", param=25.5) == "/float/25.5"
# Test path conversion
response = client.get("/path/some/example")
assert response.status_code == 200
assert response.json() == {"path": "some/example"}
assert app.url_path_for("path-convertor", param="some/example") == "/path/some/example"
# Test UUID conversion
response = client.get("/uuid/ec38df32-ceda-4cfa-9b4a-1aeb94ad551a")
assert response.status_code == 200
assert response.json() == {"uuid": "ec38df32-ceda-4cfa-9b4a-1aeb94ad551a"}
assert (
app.url_path_for("uuid-convertor", param=uuid.UUID("ec38df32-ceda-4cfa-9b4a-1aeb94ad551a"))
== "/uuid/ec38df32-ceda-4cfa-9b4a-1aeb94ad551a"
)
def test_url_path_for() -> None:
assert app.url_path_for("homepage") == "/"
assert app.url_path_for("user", username="tomchristie") == "/users/tomchristie"
assert app.url_path_for("websocket_endpoint") == "/ws"
with pytest.raises(NoMatchFound, match='No route exists for name "broken" and params "".'):
assert app.url_path_for("broken")
with pytest.raises(NoMatchFound, match='No route exists for name "broken" and params "key, key2".'):
assert app.url_path_for("broken", key="value", key2="value2")
with pytest.raises(AssertionError):
app.url_path_for("user", username="tom/christie")
with pytest.raises(AssertionError):
app.url_path_for("user", username="")
def test_url_for() -> None:
assert app.url_path_for("homepage").make_absolute_url(base_url="https://example.org") == "https://example.org/"
assert (
app.url_path_for("homepage").make_absolute_url(base_url="https://example.org/root_path/")
== "https://example.org/root_path/"
)
assert (
app.url_path_for("user", username="tomchristie").make_absolute_url(base_url="https://example.org")
== "https://example.org/users/tomchristie"
)
assert (
app.url_path_for("user", username="tomchristie").make_absolute_url(base_url="https://example.org/root_path/")
== "https://example.org/root_path/users/tomchristie"
)
assert (
app.url_path_for("websocket_endpoint").make_absolute_url(base_url="https://example.org")
== "wss://example.org/ws"
)
def test_router_add_route(client: TestClient) -> None:
response = client.get("/func")
assert response.status_code == 200
assert response.text == "Hello, world!"
def test_router_duplicate_path(client: TestClient) -> None:
response = client.post("/func")
assert response.status_code == 200
assert response.text == "Hello, POST!"
def test_router_add_websocket_route(client: TestClient) -> None:
with client.websocket_connect("/ws") as session:
text = session.receive_text()
assert text == "Hello, world!"
with client.websocket_connect("/ws/test") as session:
text = session.receive_text()
assert text == "Hello, test!"
def test_router_middleware(test_client_factory: TestClientFactory) -> None:
class CustomMiddleware:
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
response = PlainTextResponse("OK")
await response(scope, receive, send)
app = Router(
routes=[Route("/", homepage)],
middleware=[Middleware(CustomMiddleware)],
)
client = test_client_factory(app)
response = client.get("/")
assert response.status_code == 200
assert response.text == "OK"
def http_endpoint(request: Request) -> Response:
url = request.url_for("http_endpoint")
return Response(f"URL: {url}", media_type="text/plain")
| PartialRoutes |
python | coleifer__peewee | tests/fields.py | {
"start": 5163,
"end": 10262
} | class ____(ModelTestCase):
requires = [DateModel]
@requires_models(CustomDateTimeModel)
def test_date_time_custom_format(self):
cdtm = CustomDateTimeModel.create(date_time='01/02/2003 01:37 PM')
cdtm_db = CustomDateTimeModel[cdtm.id]
self.assertEqual(cdtm_db.date_time,
datetime.datetime(2003, 1, 2, 13, 37, 0))
def test_date_fields(self):
dt1 = datetime.datetime(2011, 1, 2, 11, 12, 13, 54321)
dt2 = datetime.datetime(2011, 1, 2, 11, 12, 13)
d1 = datetime.date(2011, 1, 3)
t1 = datetime.time(11, 12, 13, 54321)
t2 = datetime.time(11, 12, 13)
if isinstance(self.database, MySQLDatabase):
dt1 = dt1.replace(microsecond=0)
t1 = t1.replace(microsecond=0)
dm1 = DateModel.create(date_time=dt1, date=d1, time=t1)
dm2 = DateModel.create(date_time=dt2, time=t2)
dm1_db = DateModel.get(DateModel.id == dm1.id)
self.assertEqual(dm1_db.date, d1)
self.assertEqual(dm1_db.date_time, dt1)
self.assertEqual(dm1_db.time, t1)
dm2_db = DateModel.get(DateModel.id == dm2.id)
self.assertEqual(dm2_db.date, None)
self.assertEqual(dm2_db.date_time, dt2)
self.assertEqual(dm2_db.time, t2)
def test_extract_parts(self):
dm = DateModel.create(
date_time=datetime.datetime(2011, 1, 2, 11, 12, 13, 54321),
date=datetime.date(2012, 2, 3),
time=datetime.time(3, 13, 37))
query = (DateModel
.select(DateModel.date_time.year, DateModel.date_time.month,
DateModel.date_time.day, DateModel.date_time.hour,
DateModel.date_time.minute,
DateModel.date_time.second, DateModel.date.year,
DateModel.date.month, DateModel.date.day,
DateModel.time.hour, DateModel.time.minute,
DateModel.time.second)
.tuples())
row, = query
if IS_SQLITE or IS_MYSQL:
self.assertEqual(row,
(2011, 1, 2, 11, 12, 13, 2012, 2, 3, 3, 13, 37))
else:
self.assertTrue(row in [
(2011., 1., 2., 11., 12., 13.054321, 2012., 2., 3., 3., 13.,
37.),
(D('2011'), D('1'), D('2'), D('11'), D('12'), D('13.054321'),
D('2012'), D('2'), D('3'), D('3'), D('13'), D('37'))])
def test_truncate_date(self):
dm = DateModel.create(
date_time=datetime.datetime(2001, 2, 3, 4, 5, 6, 7),
date=datetime.date(2002, 3, 4))
accum = []
for p in ('year', 'month', 'day', 'hour', 'minute', 'second'):
accum.append(DateModel.date_time.truncate(p))
for p in ('year', 'month', 'day'):
accum.append(DateModel.date.truncate(p))
query = DateModel.select(*accum).tuples()
data = list(query[0])
# Postgres includes timezone info, so strip that for comparison.
if IS_POSTGRESQL or IS_CRDB:
data = [dt.replace(tzinfo=None) for dt in data]
self.assertEqual(data, [
datetime.datetime(2001, 1, 1, 0, 0, 0),
datetime.datetime(2001, 2, 1, 0, 0, 0),
datetime.datetime(2001, 2, 3, 0, 0, 0),
datetime.datetime(2001, 2, 3, 4, 0, 0),
datetime.datetime(2001, 2, 3, 4, 5, 0),
datetime.datetime(2001, 2, 3, 4, 5, 6),
datetime.datetime(2002, 1, 1, 0, 0, 0),
datetime.datetime(2002, 3, 1, 0, 0, 0),
datetime.datetime(2002, 3, 4, 0, 0, 0)])
def test_to_timestamp(self):
dt = datetime.datetime(2019, 1, 2, 3, 4, 5)
ts = calendar.timegm(dt.utctimetuple())
dt2 = datetime.datetime(2019, 1, 3)
ts2 = calendar.timegm(dt2.utctimetuple())
DateModel.create(date_time=dt, date=dt2.date())
query = DateModel.select(
DateModel.id,
DateModel.date_time.to_timestamp().alias('dt_ts'),
DateModel.date.to_timestamp().alias('dt2_ts'))
obj = query.get()
self.assertEqual(obj.dt_ts, ts)
self.assertEqual(obj.dt2_ts, ts2)
ts3 = ts + 86400
query = (DateModel.select()
.where((DateModel.date_time.to_timestamp() + 86400) < ts3))
self.assertRaises(DateModel.DoesNotExist, query.get)
query = (DateModel.select()
.where((DateModel.date.to_timestamp() + 86400) > ts3))
self.assertEqual(query.get().id, obj.id)
def test_distinct_date_part(self):
years = (1980, 1990, 2000, 2010)
for i, year in enumerate(years):
for j in range(i + 1):
DateModel.create(date=datetime.date(year, i + 1, 1))
query = (DateModel
.select(DateModel.date.year.distinct())
.order_by(DateModel.date.year))
self.assertEqual([year for year, in query.tuples()],
[1980, 1990, 2000, 2010])
| TestDateFields |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/vertex_ai/test_generative_model.py | {
"start": 6798,
"end": 7998
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("generative_model.GenerativeModelHook"))
@mock.patch("google.cloud.aiplatform_v1beta1.types.CountTokensResponse.to_dict")
def test_execute(self, to_dict_mock, mock_hook):
contents = ["In 10 words or less, what is Apache Airflow?"]
pretrained_model = "gemini-pro"
with pytest.warns(AirflowProviderDeprecationWarning):
op = CountTokensOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
contents=contents,
pretrained_model=pretrained_model,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.count_tokens.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
contents=contents,
pretrained_model=pretrained_model,
)
| TestVertexAICountTokensOperator |
python | realpython__materials | typer-cli-python/source_code_final/rptodo/rptodo.py | {
"start": 294,
"end": 2385
} | class ____:
def __init__(self, db_path: Path) -> None:
self._db_handler = DatabaseHandler(db_path)
def add(self, description: List[str], priority: int = 2) -> CurrentTodo:
"""Add a new to-do to the database."""
description_text = " ".join(description)
if not description_text.endswith("."):
description_text += "."
todo = {
"Description": description_text,
"Priority": priority,
"Done": False,
}
read = self._db_handler.read_todos()
if read.error == DB_READ_ERROR:
return CurrentTodo(todo, read.error)
read.todo_list.append(todo)
write = self._db_handler.write_todos(read.todo_list)
return CurrentTodo(todo, write.error)
def get_todo_list(self) -> List[Dict[str, Any]]:
"""Return the current to-do list."""
read = self._db_handler.read_todos()
return read.todo_list
def set_done(self, todo_id: int) -> CurrentTodo:
"""Set a to-do as done."""
read = self._db_handler.read_todos()
if read.error:
return CurrentTodo({}, read.error)
try:
todo = read.todo_list[todo_id - 1]
except IndexError:
return CurrentTodo({}, ID_ERROR)
todo["Done"] = True
write = self._db_handler.write_todos(read.todo_list)
return CurrentTodo(todo, write.error)
def remove(self, todo_id: int) -> CurrentTodo:
"""Remove a to-do from the database using its id or index."""
read = self._db_handler.read_todos()
if read.error:
return CurrentTodo({}, read.error)
try:
todo = read.todo_list.pop(todo_id - 1)
except IndexError:
return CurrentTodo({}, ID_ERROR)
write = self._db_handler.write_todos(read.todo_list)
return CurrentTodo(todo, write.error)
def remove_all(self) -> CurrentTodo:
"""Remove all to-dos from the database."""
write = self._db_handler.write_todos([])
return CurrentTodo({}, write.error)
| Todoer |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_internal_app_tokens.py | {
"start": 440,
"end": 1144
} | class ____(APITestCase):
endpoint = "sentry-api-0-sentry-internal-app-tokens"
def setUp(self) -> None:
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user, name="My Org")
self.project = self.create_project(organization=self.org)
self.internal_sentry_app = self.create_internal_integration(
name="My Internal App", organization=self.org
)
self.token = self.create_internal_integration_token(
user=self.user, internal_integration=self.internal_sentry_app
)
self.superuser = self.create_user(is_superuser=True)
@control_silo_test
| SentryInternalAppTokenTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 16034,
"end": 16463
} | class ____(DistributedDelegate, ds_types.Mirrored):
"""Holds a map from replica to values which are kept in sync."""
def _get_cross_replica(self):
return self._get_on_device_or_primary()
def _as_graph_element(self):
obj = self._get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _is_mirrored(self):
return True
| Mirrored |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 9844,
"end": 10069
} | class ____(TestCase):
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
| TestApplyOverAxes |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_ticker.py | {
"start": 29184,
"end": 37164
} | class ____:
offset_data = [
(123, 189, 0),
(-189, -123, 0),
(12341, 12349, 12340),
(-12349, -12341, -12340),
(99999.5, 100010.5, 100000),
(-100010.5, -99999.5, -100000),
(99990.5, 100000.5, 100000),
(-100000.5, -99990.5, -100000),
(1233999, 1234001, 1234000),
(-1234001, -1233999, -1234000),
(1, 1, 1),
(123, 123, 0),
# Test cases courtesy of @WeatherGod
(.4538, .4578, .45),
(3789.12, 3783.1, 3780),
(45124.3, 45831.75, 45000),
(0.000721, 0.0007243, 0.00072),
(12592.82, 12591.43, 12590),
(9., 12., 0),
(900., 1200., 0),
(1900., 1200., 0),
(0.99, 1.01, 1),
(9.99, 10.01, 10),
(99.99, 100.01, 100),
(5.99, 6.01, 6),
(15.99, 16.01, 16),
(-0.452, 0.492, 0),
(-0.492, 0.492, 0),
(12331.4, 12350.5, 12300),
(-12335.3, 12335.3, 0),
]
use_offset_data = [True, False]
useMathText_data = [True, False]
# (sci_type, scilimits, lim, orderOfMag, fewticks)
scilimits_data = [
(False, (0, 0), (10.0, 20.0), 0, False),
(True, (-2, 2), (-10, 20), 0, False),
(True, (-2, 2), (-20, 10), 0, False),
(True, (-2, 2), (-110, 120), 2, False),
(True, (-2, 2), (-120, 110), 2, False),
(True, (-2, 2), (-.001, 0.002), -3, False),
(True, (-7, 7), (0.18e10, 0.83e10), 9, True),
(True, (0, 0), (-1e5, 1e5), 5, False),
(True, (6, 6), (-1e5, 1e5), 6, False),
]
cursor_data = [
[0., "0.000"],
[0.0123, "0.012"],
[0.123, "0.123"],
[1.23, "1.230"],
[12.3, "12.300"],
]
format_data = [
(.1, "1e-1"),
(.11, "1.1e-1"),
(1e8, "1e8"),
(1.1e8, "1.1e8"),
]
@pytest.mark.parametrize('unicode_minus, result',
[(True, "\N{MINUS SIGN}1"), (False, "-1")])
def test_unicode_minus(self, unicode_minus, result):
mpl.rcParams['axes.unicode_minus'] = unicode_minus
assert (
plt.gca().xaxis.get_major_formatter().format_data_short(-1).strip()
== result)
@pytest.mark.parametrize('left, right, offset', offset_data)
def test_offset_value(self, left, right, offset):
fig, ax = plt.subplots()
formatter = ax.xaxis.get_major_formatter()
with (pytest.warns(UserWarning, match='Attempting to set identical')
if left == right else nullcontext()):
ax.set_xlim(left, right)
ax.xaxis._update_ticks()
assert formatter.offset == offset
with (pytest.warns(UserWarning, match='Attempting to set identical')
if left == right else nullcontext()):
ax.set_xlim(right, left)
ax.xaxis._update_ticks()
assert formatter.offset == offset
@pytest.mark.parametrize('use_offset', use_offset_data)
def test_use_offset(self, use_offset):
with mpl.rc_context({'axes.formatter.useoffset': use_offset}):
tmp_form = mticker.ScalarFormatter()
assert use_offset == tmp_form.get_useOffset()
assert tmp_form.offset == 0
@pytest.mark.parametrize('use_math_text', useMathText_data)
def test_useMathText(self, use_math_text):
with mpl.rc_context({'axes.formatter.use_mathtext': use_math_text}):
tmp_form = mticker.ScalarFormatter()
assert use_math_text == tmp_form.get_useMathText()
def test_set_use_offset_float(self):
tmp_form = mticker.ScalarFormatter()
tmp_form.set_useOffset(0.5)
assert not tmp_form.get_useOffset()
assert tmp_form.offset == 0.5
def test_set_use_offset_bool(self):
tmp_form = mticker.ScalarFormatter()
tmp_form.set_useOffset(True)
assert tmp_form.get_useOffset()
assert tmp_form.offset == 0
tmp_form.set_useOffset(False)
assert not tmp_form.get_useOffset()
assert tmp_form.offset == 0
def test_set_use_offset_int(self):
tmp_form = mticker.ScalarFormatter()
tmp_form.set_useOffset(1)
assert not tmp_form.get_useOffset()
assert tmp_form.offset == 1
def test_use_locale(self):
conv = locale.localeconv()
sep = conv['thousands_sep']
if not sep or conv['grouping'][-1:] in ([], [locale.CHAR_MAX]):
pytest.skip('Locale does not apply grouping') # pragma: no cover
with mpl.rc_context({'axes.formatter.use_locale': True}):
tmp_form = mticker.ScalarFormatter()
assert tmp_form.get_useLocale()
tmp_form.create_dummy_axis()
tmp_form.axis.set_data_interval(0, 10)
tmp_form.set_locs([1, 2, 3])
assert sep in tmp_form(1e9)
@pytest.mark.parametrize(
'sci_type, scilimits, lim, orderOfMag, fewticks', scilimits_data)
def test_scilimits(self, sci_type, scilimits, lim, orderOfMag, fewticks):
tmp_form = mticker.ScalarFormatter()
tmp_form.set_scientific(sci_type)
tmp_form.set_powerlimits(scilimits)
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(tmp_form)
ax.set_ylim(*lim)
if fewticks:
ax.yaxis.set_major_locator(mticker.MaxNLocator(4))
tmp_form.set_locs(ax.yaxis.get_majorticklocs())
assert orderOfMag == tmp_form.orderOfMagnitude
@pytest.mark.parametrize('value, expected', format_data)
def test_format_data(self, value, expected):
mpl.rcParams['axes.unicode_minus'] = False
sf = mticker.ScalarFormatter()
assert sf.format_data(value) == expected
@pytest.mark.parametrize('data, expected', cursor_data)
def test_cursor_precision(self, data, expected):
fig, ax = plt.subplots()
ax.set_xlim(-1, 1) # Pointing precision of 0.001.
fmt = ax.xaxis.get_major_formatter().format_data_short
assert fmt(data) == expected
@pytest.mark.parametrize('data, expected', cursor_data)
def test_cursor_dummy_axis(self, data, expected):
# Issue #17624
sf = mticker.ScalarFormatter()
sf.create_dummy_axis()
sf.axis.set_view_interval(0, 10)
fmt = sf.format_data_short
assert fmt(data) == expected
assert sf.axis.get_tick_space() == 9
assert sf.axis.get_minpos() == 0
def test_mathtext_ticks(self):
mpl.rcParams.update({
'font.family': 'serif',
'font.serif': 'cmr10',
'axes.formatter.use_mathtext': False
})
if parse_version(pytest.__version__).major < 8:
with pytest.warns(UserWarning, match='cmr10 font should ideally'):
fig, ax = plt.subplots()
ax.set_xticks([-1, 0, 1])
fig.canvas.draw()
else:
with (pytest.warns(UserWarning, match="Glyph 8722"),
pytest.warns(UserWarning, match='cmr10 font should ideally')):
fig, ax = plt.subplots()
ax.set_xticks([-1, 0, 1])
fig.canvas.draw()
def test_cmr10_substitutions(self, caplog):
mpl.rcParams.update({
'font.family': 'cmr10',
'mathtext.fontset': 'cm',
'axes.formatter.use_mathtext': True,
})
# Test that it does not log a warning about missing glyphs.
with caplog.at_level(logging.WARNING, logger='matplotlib.mathtext'):
fig, ax = plt.subplots()
ax.plot([-0.03, 0.05], [40, 0.05])
ax.set_yscale('log')
yticks = [0.02, 0.3, 4, 50]
formatter = mticker.LogFormatterSciNotation()
ax.set_yticks(yticks, map(formatter, yticks))
fig.canvas.draw()
assert not caplog.text
def test_empty_locs(self):
sf = mticker.ScalarFormatter()
sf.set_locs([])
assert sf(0.5) == ''
| TestScalarFormatter |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/client.py | {
"start": 779,
"end": 1648
} | class ____(ThreadedZMQSocketChannel, SuperQObject):
"""A ZMQ socket emitting a Qt signal when a message is received."""
message_received = QtCore.Signal(object)
def process_events(self):
""" Process any pending GUI events.
"""
QtCore.QCoreApplication.instance().processEvents()
def call_handlers(self, msg):
"""This method is called in the ioloop thread when a message arrives.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
# Emit the generic signal.
self.message_received.emit(msg)
def closed(self):
"""Check if the channel is closed."""
return self.stream is None or self.stream.closed()
| QtZMQSocketChannel |
python | sympy__sympy | sympy/vector/vector.py | {
"start": 12351,
"end": 14171
} | class ____(Vector, AtomicExpr):
"""
Class to denote a base vector.
"""
def __new__(cls, index, system, pretty_str=None, latex_str=None):
if pretty_str is None:
pretty_str = "x{}".format(index)
if latex_str is None:
latex_str = "x_{}".format(index)
pretty_str = str(pretty_str)
latex_str = str(latex_str)
# Verify arguments
if index not in range(0, 3):
raise ValueError("index must be 0, 1 or 2")
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D")
name = system._vector_names[index]
# Initialize an object
obj = super().__new__(cls, S(index), system)
# Assign important attributes
obj._base_instance = obj
obj._components = {obj: S.One}
obj._measure_number = S.One
obj._name = system._name + '.' + name
obj._pretty_form = '' + pretty_str
obj._latex_form = latex_str
obj._system = system
# The _id is used for printing purposes
obj._id = (index, system)
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
# This attr is used for re-expression to one of the systems
# involved in the definition of the Vector. Applies to
# VectorMul and VectorAdd too.
obj._sys = system
return obj
@property
def system(self):
return self._system
def _sympystr(self, printer):
return self._name
def _sympyrepr(self, printer):
index, system = self._id
return printer._print(system) + '.' + system._vector_names[index]
@property
def free_symbols(self):
return {self}
def _eval_conjugate(self):
return self
| BaseVector |
python | django__django | tests/fixtures_regress/tests.py | {
"start": 21048,
"end": 28192
} | class ____(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
"loaddata",
"model-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance.json",
verbosity=0,
)
self.assertEqual(NKChild.objects.get(pk=1).data, "apple")
self.assertEqual(RefToNKChild.objects.get(pk=1).nk_fk.data, "apple")
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
"loaddata",
"model-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance.json",
verbosity=0,
)
management.call_command(
"loaddata",
"nk-inheritance2.xml",
verbosity=0,
)
self.assertEqual(NKChild.objects.get(pk=2).data, "banana")
self.assertEqual(RefToNKChild.objects.get(pk=2).nk_fk.data, "apple")
def test_nk_on_serialize(self):
"""
Natural key requirements are taken into account when serializing
models.
"""
management.call_command(
"loaddata",
"forward_ref_lookup.json",
verbosity=0,
)
out = StringIO()
management.call_command(
"dumpdata",
"fixtures_regress.book",
"fixtures_regress.person",
"fixtures_regress.store",
verbosity=0,
format="json",
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""
[{"fields": {"main": null, "name": "Amazon"},
"model": "fixtures_regress.store"},
{"fields": {"main": null, "name": "Borders"},
"model": "fixtures_regress.store"},
{"fields": {"name": "Neal Stephenson"}, "model": "fixtures_regress.person"},
{"pk": 1, "model": "fixtures_regress.book",
"fields": {"stores": [["Amazon"], ["Borders"]],
"name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]
""",
)
def test_dependency_sorting(self):
"""
It doesn't matter what order you mention the models, Store *must* be
serialized before then Person, and both must be serialized before Book.
"""
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Book, Person, Store])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_2(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Book, Store, Person])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_3(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Store, Book, Person])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_4(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Store, Person, Book])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_5(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Book, Store])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_6(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Store, Book])]
)
self.assertEqual(sorted_deps, [Store, Person, Book])
def test_dependency_sorting_dangling(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle1, Store, Book])]
)
self.assertEqual(sorted_deps, [Circle1, Store, Person, Book])
def test_dependency_sorting_tight_circular(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle2, Circle1, Store, Book])]
)
def test_dependency_sorting_tight_circular_2(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Circle1, Book, Circle2])]
)
def test_dependency_self_referential(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle3 in "
"serialized app list.",
):
serializers.sort_dependencies([("fixtures_regress", [Book, Circle3])])
def test_dependency_sorting_long(self):
with self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized "
"app list.",
):
serializers.sort_dependencies(
[("fixtures_regress", [Person, Circle2, Circle1, Circle3, Store, Book])]
)
def test_dependency_sorting_normal(self):
sorted_deps = serializers.sort_dependencies(
[("fixtures_regress", [Person, ExternalDependency, Book])]
)
self.assertEqual(sorted_deps, [Person, Book, ExternalDependency])
def test_normal_pk(self):
"""
Normal primary keys work on a model with natural key capabilities.
"""
management.call_command(
"loaddata",
"non_natural_1.json",
verbosity=0,
)
management.call_command(
"loaddata",
"forward_ref_lookup.json",
verbosity=0,
)
management.call_command(
"loaddata",
"non_natural_2.xml",
verbosity=0,
)
books = Book.objects.all()
self.assertQuerySetEqual(
books,
[
"<Book: Cryptonomicon by Neal Stephenson (available at Amazon, "
"Borders)>",
"<Book: Ender's Game by Orson Scott Card (available at Collins "
"Bookstore)>",
"<Book: Permutation City by Greg Egan (available at Angus and "
"Robertson)>",
],
transform=repr,
)
| NaturalKeyFixtureTests |
python | sphinx-doc__sphinx | sphinx/ext/todo.py | {
"start": 1881,
"end": 2801
} | class ____(Domain):
name = 'todo'
label = 'todo'
@property
def todos(self) -> dict[str, list[todo_node]]:
return self.data.setdefault('todos', {})
def clear_doc(self, docname: str) -> None:
self.todos.pop(docname, None)
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
for docname in docnames:
self.todos[docname] = otherdata['todos'][docname]
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
todos = self.todos.setdefault(docname, [])
for todo in document.findall(todo_node):
env.events.emit('todo-defined', todo)
todos.append(todo)
if env.config.todo_emit_warnings:
logger.warning(
__('TODO entry found: %s'), todo[1].astext(), location=todo
)
| TodoDomain |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 13999,
"end": 14808
} | class ____(graphene.Mutation):
"""Retries a set of partition backfill runs. Retrying a backfill will create a new backfill to retry any failed partitions."""
Output = graphene.NonNull(GrapheneLaunchBackfillResult)
class Arguments:
reexecutionParams = graphene.Argument(GrapheneReexecutionParams)
class Meta:
name = "RetryBackfillMutation"
@capture_error
@require_permission_check(Permissions.LAUNCH_PARTITION_BACKFILL)
def mutate(
self,
graphene_info: ResolveInfo,
reexecutionParams: GrapheneReexecutionParams,
):
return retry_partition_backfill(
graphene_info,
backfill_id=reexecutionParams["parentRunId"],
strategy=reexecutionParams["strategy"],
)
| GrapheneReexecuteBackfillMutation |
python | google__jax | jax/_src/export/_export.py | {
"start": 14171,
"end": 71565
} | class ____(Protocol):
def __call__(self, aux_data: PyTreeAuxData, children: Sequence[Any]) -> Any:
"""Materializes a T given a deserialized AuxData and children.
This is similar in scope with the ``unflatten_func``.
"""
serialization_registry: dict[type, tuple[str, _SerializeAuxData]] = {}
deserialization_registry: dict[
str,
tuple[type, _DeserializeAuxData, _BuildFromChildren]] = {}
def _is_namedtuple(nodetype: type) -> bool:
return (issubclass(nodetype, tuple) and
hasattr(nodetype, "_fields") and
isinstance(nodetype._fields, Sequence) and
all(isinstance(f, str) for f in nodetype._fields))
def register_pytree_node_serialization(
nodetype: type[T],
*,
serialized_name: str,
serialize_auxdata: _SerializeAuxData,
deserialize_auxdata: _DeserializeAuxData,
from_children: _BuildFromChildren | None = None
) -> type[T]:
"""Registers a custom PyTree node for serialization and deserialization.
You must use this function before you can serialize and deserialize PyTree
nodes for the types not supported natively. We serialize PyTree nodes for
the ``in_tree`` and ``out_tree`` fields of ``Exported``, which are part of the
exported function's calling convention.
This function must be called after calling
:func:`jax.tree_util.register_pytree_node` (except for ``collections.namedtuple``,
which do not require a call to ``register_pytree_node``).
Args:
nodetype: the type whose PyTree nodes we want to serialize. It is an
error to attempt to register multiple serializations for a ``nodetype``.
serialized_name: a string that will be present in the serialization and
will be used to look up the registration during deserialization. It is an
error to attempt to register multiple serializations for a
``serialized_name``.
serialize_auxdata: serialize the PyTree auxdata (returned by the
``flatten_func`` argument to :func:`jax.tree_util.register_pytree_node`.).
deserialize_auxdata: deserialize the auxdata that was serialized by the
``serialize_auxdata``.
from_children: if present, this is a function that takes that result of
``deserialize_auxdata`` along with some children and creates an instance
of ``nodetype``. This is similar to the ``unflatten_func`` passed to
:func:`jax.tree_util.register_pytree_node`. If not present, we look up
and use the ``unflatten_func``. This is needed for ``collections.namedtuple``,
which does not have a ``register_pytree_node``, but it can be useful to
override that function. Note that the result of ``from_children`` is
only used with :func:`jax.tree_util.tree_structure` to construct a proper
PyTree node, it is not used to construct the outputs of the serialized
function.
Returns:
the same type passed as ``nodetype``, so that this function can
be used as a class decorator.
"""
if nodetype in serialization_registry:
raise ValueError(
f"Duplicate serialization registration for type `{nodetype}`. "
"Previous registration was with serialized_name "
f"`{serialization_registry[nodetype][0]}`.")
if serialized_name in deserialization_registry:
raise ValueError(
"Duplicate serialization registration for "
f"serialized_name `{serialized_name}`. "
"Previous registration was for type "
f"`{deserialization_registry[serialized_name][0]}`.")
if from_children is None:
if nodetype not in tree_util._registry:
raise ValueError(
f"If `from_children` is not present, you must call first"
f"`jax.tree_util.register_pytree_node` for `{nodetype}`")
from_children = tree_util._registry[nodetype].from_iter
serialization_registry[nodetype] = (
serialized_name, serialize_auxdata)
deserialization_registry[serialized_name] = (
nodetype, deserialize_auxdata, from_children)
return nodetype
def register_namedtuple_serialization(
nodetype: type[T],
*,
serialized_name: str) -> type[T]:
"""Registers a namedtuple for serialization and deserialization.
JAX has native PyTree support for ``collections.namedtuple``, and does not
require a call to :func:`jax.tree_util.register_pytree_node`. However, if you
want to serialize functions that have inputs of outputs of a
namedtuple type, you must register that type for serialization.
Args:
nodetype: the type whose PyTree nodes we want to serialize. It is an
error to attempt to register multiple serializations for a ``nodetype``.
On deserialization, this type must have the same set of keys that
were present during serialization.
serialized_name: a string that will be present in the serialization and
will be used to look up the registration during deserialization. It is an
error to attempt to register multiple serializations for
a ``serialized_name``.
Returns:
the same type passed as ``nodetype``, so that this function can
be used as a class decorator.
"""
if not _is_namedtuple(nodetype):
raise ValueError("Use `jax.export.register_pytree_node_serialization` for "
"types other than `collections.namedtuple`.")
def serialize_auxdata(aux_data: PyTreeAuxData) -> bytes:
# Store the serialized keys in the serialized auxdata
del aux_data
return json.dumps(nodetype._fields).encode("utf-8")
def deserialize_auxdata(serialized_aux_data: bytes) -> PyTreeAuxData:
return json.loads(serialized_aux_data.decode("utf-8"))
def from_children(aux_data: PyTreeAuxData, children: Sequence[Any]) -> Any:
# Use our own "from_children" because namedtuples do not have a pytree
# registration.
ser_keys = cast(Sequence[str], aux_data)
assert len(ser_keys) == len(children)
return nodetype(** dict(zip(ser_keys, children)))
return register_pytree_node_serialization(
nodetype,
serialized_name=serialized_name,
serialize_auxdata=serialize_auxdata,
deserialize_auxdata=deserialize_auxdata,
from_children=from_children)
# collections.OrderedDict is registered as a pytree node with auxdata being
# `tuple(x.keys())`.
def _serialize_ordereddict_keys(keys):
if isinstance(keys, Sequence) and all(isinstance(k, str) for k in keys):
return json.dumps(keys).encode("utf-8")
else:
raise NotImplementedError(
"Serialization of collections.OrderedDict is supported only when the "
f"keys are strings. Found keys: {keys}.")
register_pytree_node_serialization(
collections.OrderedDict,
serialized_name="collections.OrderedDict",
serialize_auxdata=_serialize_ordereddict_keys,
deserialize_auxdata=lambda b: json.loads(b.decode("utf-8")))
def default_export_platform() -> str:
"""Retrieves the default export platform.
One of: ``'tpu'``, ``'cpu'``, ``'cuda'``, ``'rocm'``.
"""
# Canonicalize to turn 'gpu' into 'cuda' or 'rocm'
return xb.canonicalize_platform(xb.default_backend())
default_lowering_platform = default_export_platform
def shape_and_dtype_jax_array(a) -> tuple[Sequence[int | None], DType]:
"""Returns the shape and dtype of a jax.Array or a j"""
if isinstance(a, api.ShapeDtypeStruct):
return a.shape, a.dtype
aval = core.get_aval(a)
return aval.shape, aval.dtype
@functools.partial(traceback_util.api_boundary,
repro_api_name="jax.export.export")
def export(
fun_jit: stages.Wrapped,
*,
platforms: Sequence[str] | None = None,
disabled_checks: Sequence[DisabledSafetyCheck] = (),
_override_lowering_rules: Sequence[tuple[Any, Any]] | None = None
) -> Callable[..., Exported]:
"""Exports a JAX function for persistent serialization.
Args:
fun_jit: the function to export. Should be the result of :func:`jax.jit`.
platforms:
Optional sequence containing a subset of 'tpu', 'cpu',
'cuda', 'rocm'. If more than one platform is specified, then
the exported code takes an argument specifying the platform.
If None, then use the default JAX backend.
The calling convention for multiple platforms is explained at
https://docs.jax.dev/en/latest/export/export.html#module-calling-convention.
_override_lowering_rules: an optional sequence of custom lowering rules
for some JAX primitives. Each element of the sequence is a pair
of a JAX primitive and a lowering function. Defining lowering rules
is an advanced feature using JAX internal APIs, which are subject
to change. Furthermore, the responsibility for the stability of the
MLIR emitted through these custom lowering rules, rests with the user
of these rules.
disabled_checks: the safety checks to disable. See documentation for
of :class:`jax.export.DisabledSafetyCheck`.
Returns:
a function that takes args and kwargs pytrees of :class:`jax.ShapeDtypeStruct`,
or values with ``.shape`` and ``.dtype`` attributes, and returns an
:class:`~jax.export.Exported`.
Usage:
>>> from jax import export
>>> exported: export.Exported = export.export(jnp.sin)(
... np.arange(4, dtype=np.float32))
>>>
>>> # You can inspect the Exported object
>>> exported.in_avals
(ShapedArray(float32[4]),)
>>> blob: bytearray = exported.serialize()
>>>
>>> # The serialized bytes are safe to use in a separate process
>>> rehydrated: export.Exported = export.deserialize(blob)
>>> rehydrated.fun_name
'sin'
>>> rehydrated.call(np.array([.1, .2, .3, .4], dtype=np.float32))
Array([0.09983342, 0.19866933, 0.29552022, 0.38941833], dtype=float32)
"""
return _export_internal(fun_jit, platforms=platforms,
disabled_checks=disabled_checks,
override_lowering_rules=_override_lowering_rules)
# TODO(necula): remove this once we improve the integration with jax2tf.
def _export_internal(
fun_jit: stages.Wrapped,
*,
platforms: Sequence[str] | None = None,
disabled_checks: Sequence[DisabledSafetyCheck] = (),
_device_assignment_for_internal_jax2tf_use_only=None,
override_lowering_rules=None,
) -> Callable[..., Exported]:
"""Exports native serialization for a JAX function.
Note: this function exists only for internal usage by jax2tf. Use
:mod:`jax.export` instead.
See https://docs.jax.dev/en/latest/export/export.html
See docstring of ``export`` for more details.
"""
if not isinstance(fun_jit, stages.Wrapped):
raise ValueError(
f"Function to be exported must be the result of `jit` but is: {fun_jit}")
def do_export(*args_specs, **kwargs_specs) -> Exported:
if platforms is not None:
actual_lowering_platforms = tuple(platforms)
else:
actual_lowering_platforms = (default_export_platform(),)
# TODO: move to `lower`
check_symbolic_scope_errors(fun_jit, args_specs, kwargs_specs)
traced = fun_jit.trace(*args_specs, **kwargs_specs)
lowered = traced.lower(
lowering_platforms=actual_lowering_platforms,
_private_parameters=mlir.LoweringParameters(
override_lowering_rules=override_lowering_rules,
for_export=True,
hoist_constants_as_args=False,
export_ignore_forward_compatibility=config.export_ignore_forward_compatibility.value))
return _export_lowered(
lowered, traced.jaxpr, traced.fun_name,
disabled_checks=disabled_checks,
_device_assignment_for_internal_jax2tf_use_only=_device_assignment_for_internal_jax2tf_use_only)
return do_export
def check_symbolic_scope_errors(fun_jax, args_specs, kwargs_specs):
symbolic_scope: tuple[shape_poly.SymbolicScope, tree_util.KeyPath] | None = None # type: ignore[invalid-annotation,unused-ignore]
for k_path, aval in tree_util.tree_flatten_with_path((args_specs, kwargs_specs))[0]:
# Static args may have no `shape` attribute.
if not hasattr(aval, "shape"):
continue
for d in aval.shape:
if shape_poly.is_symbolic_dim(d):
if symbolic_scope is None:
symbolic_scope = (d.scope, k_path)
continue
symbolic_scope[0]._check_same_scope(
d, when=f"when exporting {util.fun_name(fun_jax)}",
self_descr=f"current (from {shape_poly.args_kwargs_path_to_str(symbolic_scope[1])}) ",
other_descr=shape_poly.args_kwargs_path_to_str(k_path))
def _export_lowered(
lowered: stages.Lowered,
jaxpr: core.ClosedJaxpr,
fun_name: str,
disabled_checks: Sequence[DisabledSafetyCheck] = (),
_device_assignment_for_internal_jax2tf_use_only=None,
) -> Exported:
version = config.jax_export_calling_convention_version.value
if (version < minimum_supported_calling_convention_version or
version > maximum_supported_calling_convention_version):
raise ValueError(
f"The requested export calling convention version {version} is outside the "
f"range of supported versions [{minimum_supported_calling_convention_version}"
f"..{maximum_supported_calling_convention_version}]")
lowering = lowered._lowering
_check_lowering(lowering)
mlir_module = lowering.stablehlo()
args_avals_flat, _ = tree_util.tree_flatten(lowered.in_avals)
if "mut" in lowering.compile_args:
if lowering.compile_args["mut"]: raise NotImplementedError
if "kept_var_idx" in lowering.compile_args:
module_kept_var_idx = tuple(sorted(lowering.compile_args["kept_var_idx"]))
else:
# For pmap
module_kept_var_idx = tuple(range(len(args_avals_flat)))
shape_poly_state = lowering.compile_args["shape_poly_state"]
# Make a copy of mlir module as we should not mutate it
# because it may be cached
context = mlir.make_ir_context()
with context, ir.Location.unknown(context):
mlir_module = ir.Module.parse(mlir.module_to_bytecode(mlir_module))
if (not all(core.is_constant_shape(a.shape) for a in args_avals_flat)
or lowering.compile_args.get("ordered_effects", [])):
mlir_module = _wrap_main_func(
mlir_module, args_avals_flat, args_kwargs_tree=lowered.in_tree,
has_platform_index_argument=shape_poly_state.has_platform_index_argument,
module_kept_var_idx=module_kept_var_idx,
serialization_version=version)
with mlir_module.context:
mlir_module_attrs = mlir_module.operation.attributes
mlir_module_attrs["jax.uses_shape_polymorphism"] = (
mlir.ir.BoolAttr.get(shape_poly_state.uses_dim_vars))
# Shardy was used during lowering if we can find the Shardy mesh in the
# module. Note that the mesh should have been lifted by the
# `sdy-lift-inlined-meshes` pass in mlir.py.
shardy_enabled = has_sdy_mesh(ir.SymbolTable(mlir_module.operation),
mlir_module)
mlir_module_serialized = _module_to_bytecode(mlir_module)
# Figure out the result types and shapes
if "global_out_avals" in lowering.compile_args:
# This is currently the case for pjit
out_avals_flat = lowering.compile_args["global_out_avals"]
elif "shards" in lowering.compile_args: # for PmapComputation
out_avals_flat = lowering.compile_args["shards"].out_sharded_avals
else:
out_avals_flat = lowered.compile_args["out_avals"] # type: ignore
# out_avals come from the Jaxpr, and do not always reflect the out_shardings
# specification.
out_avals_flat = tuple(
aval.update(memory_space=core.mem_kind_to_space(s.memory_kind))
if not isinstance(s, sharding_impls.UnspecifiedValue) else aval
for aval, s in zip(out_avals_flat, lowering.compile_args["out_shardings"]))
# Log and then check the module.
logmsg = (f"fun_name={fun_name} version={version} "
f"lowering_platforms={lowering._platforms} " # type: ignore[unused-ignore,attribute-error]
f"disabled_checks={disabled_checks}")
logger.debug("Exported JAX function: %s\n", logmsg)
logger.debug(mlir.dump_module_message(mlir_module, "export"))
logger.debug(
"Size of mlir_module_serialized: %d byte",
len(mlir_module_serialized),
)
_check_module(mlir_module,
disabled_checks=disabled_checks,
shardy_enabled=shardy_enabled)
ordered_effects = tuple(lowering.compile_args["ordered_effects"])
unordered_effects = tuple(lowering.compile_args["unordered_effects"])
nr_devices = lowering.compile_args["num_devices"]
def export_sharding(s: LoweringSharding,
aval: core.ShapedArray) -> HloSharding | None:
if isinstance(s, sharding_impls.UnspecifiedValue):
return None
return s._to_xla_hlo_sharding(aval.ndim)
all_in_shardings = expand_in_shardings(lowering.compile_args["in_shardings"],
module_kept_var_idx,
len(args_avals_flat))
in_shardings = tuple(
export_sharding(s, aval)
for s, aval in zip(all_in_shardings, args_avals_flat))
out_shardings = tuple(
export_sharding(s, aval)
for s, aval in zip(lowering.compile_args["out_shardings"], out_avals_flat))
device_assignment = lowering._device_list # type: ignore
if _device_assignment_for_internal_jax2tf_use_only is not None:
_device_assignment_for_internal_jax2tf_use_only[0] = device_assignment
cur_mesh = None
if config.use_shardy_partitioner.value:
for sharding in itertools.chain.from_iterable([
all_in_shardings, lowering.compile_args["out_shardings"]]):
if isinstance(sharding, sharding_impls.NamedSharding):
cur_mesh = sharding.mesh
break
if cur_mesh and isinstance(cur_mesh, mesh_lib.Mesh):
cur_mesh = cur_mesh.abstract_mesh
def _get_exported_vjp(exp_primal: Exported) -> Exported:
# Turn the primal jaxpr into a function, in preparation for exporting
# the VJP. Note that jaxpr_as_fun produces a function with flat arguments
assert(jaxpr is not None) # None only when the lowered was created outside JAX
fun_jax = core.jaxpr_as_fun(jaxpr)
fun_vjp_jax, vjp_in_avals = _get_vjp_fun(
fun_jax,
in_tree=exp_primal.in_tree,
in_avals=exp_primal.in_avals,
in_shardings_hlo=exp_primal.in_shardings_hlo,
out_avals=exp_primal.out_avals,
out_shardings_hlo=exp_primal.out_shardings_hlo,
device_assignment=device_assignment,
apply_jit=True,
flat_primal_fun=True,
mesh=cur_mesh) # type: ignore[arg-type]
return export(fun_vjp_jax, # type: ignore[arg-type]
platforms=exp_primal.platforms,
disabled_checks=exp_primal.disabled_safety_checks)(*vjp_in_avals)
return Exported(
fun_name=fun_name,
in_tree=lowered.in_tree,
out_tree=lowered.out_tree,
in_avals=tuple(args_avals_flat),
out_avals=tuple(out_avals_flat),
in_shardings_hlo=in_shardings,
out_shardings_hlo=out_shardings,
nr_devices=nr_devices,
platforms=lowering._platforms, # type: ignore
ordered_effects=ordered_effects,
unordered_effects=unordered_effects,
disabled_safety_checks=tuple(disabled_checks),
mlir_module_serialized=mlir_module_serialized,
module_kept_var_idx=module_kept_var_idx,
uses_global_constants=shape_poly_state.uses_dim_vars,
calling_convention_version=version,
_get_vjp=_get_exported_vjp)
def _module_to_bytecode(module: ir.Module) -> bytes:
mlir_str = mlir.module_to_bytecode(module)
# `target_version` is used to manage situations when a StableHLO producer
# and a StableHLO consumer were built using different versions of StableHLO.
#
# Each StableHLO version `producer_version` has a compatibility window,
# i.e. range of versions [`consumer_version_min`, `consumer_version_max`],
# where StableHLO portable artifacts serialized by `producer_version`
# can be deserialized by `consumer_version` within the window.
# See https://github.com/openxla/stablehlo/blob/main/docs/compatibility.md
# for the exact extent of these compatibility guarantees.
#
# `hlo.get_version_from_compatibility_requirement(WEEK_4)` returns a version
# of StableHLO >= 4w old. This allows new StableHLO features to be used after
# ~4w and be compatible with any consumer that is updated on at least a
# monthly cadence.
#
# Note that this does not verify any JAX custom calls, which are only
# guaranteed 3w of forward compatibility, and only prevents use of new
# StableHLO features from failing on older hardware.
target_version = hlo.get_version_from_compatibility_requirement(
hlo.StablehloCompatibilityRequirement.WEEK_4)
module_serialized = xla_client._xla.mlir.serialize_portable_artifact( # type: ignore
mlir_str, target_version, xb.get_backend().serialize_with_sdy)
return module_serialized
def _wrap_main_func(
module: ir.Module,
args_avals_flat: Sequence[core.ShapedArray],
*,
args_kwargs_tree: tree_util.PyTreeDef,
has_platform_index_argument: bool,
module_kept_var_idx: tuple[int, ...],
serialization_version: int
) -> ir.Module:
"""Wraps the lowered module with a new "main" handling dimension arguments.
See calling convention documentation https://docs.jax.dev/en/latest/export/export.html#module-calling-convention.
Args:
module: a copy of HLO module as obtained from lowering.
args_avals_flat: the avals for all the arguments of the lowered function,
which correspond to the array arguments of the ``module``.
args_kwargs_tree: the PyTreeDef corresponding to ``(args, kwargs)``, for error
messages.
has_platform_index_argument: whether the ``module`` has a first platform
index argument
module_kept_var_idx: a sorted tuple of integers with the indices of arguments
in ``args_avals_flat`` that are kept as ``module`` arguments.
serialization_version: the target serialization version
Returns the wrapped module, without dimension and token arguments.
"""
dim_vars = shape_poly.all_dim_vars(args_avals_flat)
context = module.context
wrapped_module = module
with context, ir.Location.unknown(context):
symbol_table = ir.SymbolTable(wrapped_module.operation)
orig_main = symbol_table["main"]
orig_main.attributes["sym_visibility"] = ir.StringAttr.get("private")
symbol_table.set_symbol_name(orig_main, "_wrapped_jax_export_main")
orig_main_name = ir.StringAttr(symbol_table.insert(orig_main)).value
def is_token(typ, attrs):
return (typ == mlir.token_type())
orig_input_types = orig_main.type.inputs # type: ignore
arg_attrs = list(ir.ArrayAttr(orig_main.arg_attrs)) # type: ignore
# The order of args: platform_index_arg, dim args, token args, array args.
nr_platform_index_args = 1 if has_platform_index_argument else 0
nr_dim_args = len(dim_vars)
token_arg_idxs = [i for i, (typ, attrs) in enumerate(zip(orig_input_types,
arg_attrs))
if is_token(typ, attrs)]
nr_token_args = len(token_arg_idxs)
if nr_token_args > 0:
assert min(token_arg_idxs) == nr_platform_index_args + nr_dim_args
assert token_arg_idxs == list(
range(nr_platform_index_args + nr_dim_args,
nr_platform_index_args + nr_dim_args + nr_token_args))
nr_array_args = (len(orig_input_types) - nr_platform_index_args
- nr_dim_args - nr_token_args)
assert nr_array_args >= 0
(platform_input_types, dim_var_input_types,
token_input_types, array_input_types) = util.split_list(
orig_input_types, [nr_platform_index_args, nr_dim_args, nr_token_args])
# The order of results: tokens, array results
orig_output_types = orig_main.type.results # type: ignore
result_attrs = list(ir.ArrayAttr(orig_main.result_attrs)) # type: ignore
token_result_idxs = [i for i, (typ, attrs) in enumerate(zip(orig_output_types,
result_attrs))
if is_token(typ, attrs)]
nr_token_results = len(token_result_idxs)
assert token_result_idxs == list(range(0, nr_token_results))
nr_array_results = len(orig_output_types) - nr_token_results
assert nr_array_results >= 0
new_main_arg_indices = (
*range(nr_platform_index_args),
*range(nr_platform_index_args + nr_dim_args, len(orig_input_types)))
new_main_result_indices = tuple(range(0, len(orig_output_types)))
new_main_input_types = [orig_input_types[idx] for idx in new_main_arg_indices]
new_main_output_types = [orig_output_types[idx] for idx in new_main_result_indices]
new_main_ftype = ir.FunctionType.get(new_main_input_types, new_main_output_types)
new_main_op = func_dialect.FuncOp(
"main", new_main_ftype, ip=ir.InsertionPoint.at_block_begin(wrapped_module.body))
new_main_op.attributes["sym_visibility"] = ir.StringAttr.get("public")
try:
new_arg_attrs = []
for idx in new_main_arg_indices:
new_arg_attr = {}
for attr in arg_attrs[idx]:
if attr.name == "tf.aliasing_output":
i = new_main_result_indices.index(attr.attr.value)
new_arg_attr[attr.name] = ir.IntegerAttr.get(
ir.IntegerType.get_signless(32), i
)
else:
new_arg_attr[attr.name] = attr.attr
new_arg_attrs.append(ir.DictAttr.get(new_arg_attr))
new_main_op.arg_attrs = ir.ArrayAttr.get(new_arg_attrs)
except KeyError:
pass # TODO: better detection if orig_main.arg_attrs does not exist
try:
new_main_op.result_attrs = ir.ArrayAttr.get(
[result_attrs[idx] for idx in new_main_result_indices])
except KeyError:
pass
symbol_table.insert(new_main_op)
entry_block = new_main_op.add_entry_block()
with ir.InsertionPoint(entry_block):
# Make a context just for lowering the dimension value computations
module_context = mlir.ModuleContext(
backend=None, platforms=["cpu"],
axis_context=sharding_impls.ShardingContext(0),
keepalives=[], channel_iterator=itertools.count(1),
host_callbacks=[], module=wrapped_module, context=context,
lowering_parameters=mlir.LoweringParameters(
global_constant_computation=True,
for_export=True, hoist_constants_as_args=False,
export_ignore_forward_compatibility=config.export_ignore_forward_compatibility.value,
))
ctx = mlir.LoweringRuleContext(
module_context=module_context,
name_stack=source_info_util.new_name_stack(), traceback=None,
primitive=None,
avals_in=args_avals_flat, avals_out=None,
tokens_in=mlir.TokenSet(), tokens_out=None,
const_lowering={})
# We compute dim_values from the array arguments.
new_main_op_array_args = new_main_op.arguments[-nr_array_args:]
if shape_poly.all_dim_vars(args_avals_flat):
# TODO(necula): handle module_kept_var_idx in presence of shape
# polymorphism. For now we ensured upstream that we keep all variables.
assert len(set(module_kept_var_idx)) == len(args_avals_flat)
dim_values = mlir.lower_fun(
functools.partial(shape_poly.compute_dim_vars_from_arg_shapes,
args_avals_flat, args_kwargs_tree=args_kwargs_tree),
multiple_results=True)(ctx, *new_main_op_array_args)
else:
dim_values = ()
# The arguments to pass to the call to orig_main
orig_main_args: list[ir.Value] = []
# The platform index and the dimension variables
for arg, arg_type in zip(
list(new_main_op.arguments[0:nr_platform_index_args]) + mlir.flatten_ir_values(dim_values),
platform_input_types + dim_var_input_types):
if arg.type != arg_type:
orig_main_args.append(hlo.convert(arg_type, arg))
else:
orig_main_args.append(arg)
# Then the token arguments
orig_main_args.extend(
new_main_op.arguments[nr_platform_index_args: nr_platform_index_args + nr_token_args])
# Then the array arguments. We insert a ConvertOp as the only use of
# an input argument. This helps the downstream shape refinement because
# it will set the type of input arguments to static shapes, and this
# can invalidate the module if the argument is used as the result of a
# function, or if it appears as the input to a custom_call with
# output_operand_alias attribute. See b/287386268.
for arg, arg_type in zip(new_main_op_array_args, array_input_types):
if arg.type != arg_type:
orig_main_args.append(hlo.convert(arg_type, arg))
else:
orig_main_args.append(arg)
call = func_dialect.CallOp(orig_output_types,
ir.FlatSymbolRefAttr.get(orig_main_name),
orig_main_args)
func_dialect.ReturnOp([call.results[idx] for idx in new_main_result_indices])
symbol_table.set_symbol_name(new_main_op, "main")
pipeline = passmanager.PassManager.parse(
'builtin.module(symbol-dce)')
pipeline.run(wrapped_module.operation)
return wrapped_module
def _check_lowering(lowering) -> None:
if not isinstance(lowering, pxla.MeshComputation):
raise NotImplementedError(f"serialization is supported only for jit. {lowering}")
if lowering.compile_args["host_callbacks"] or lowering.compile_args["keepalive"]:
raise NotImplementedError("serialization of host_callbacks is not yet implemented")
# Check that we do not see new compile_args. When we add a compile_args it is
# safe to add it to the allowed_compile_args if it does not change the semantics
# or the calling convention of the lowered module.
allowed_compile_args = {
"backend", "platforms", "mesh", "global_in_avals",
"global_out_avals", "in_shardings", "out_shardings", "kept_var_idx",
"mut", "spmd_lowering", "auto_spmd_lowering",
"tuple_args", "ordered_effects", "unordered_effects",
"keepalive", "host_callbacks", "pmap_nreps", "committed",
"device_assignment", "jaxpr_debug_info", "shape_poly_state",
"all_default_mem_kind", "in_layouts", "out_layouts", "all_args_info",
"pgle_profiler", "intermediate_shardings", "context_mesh",
"num_devices"}
for compile_arg in lowering.compile_args.keys():
if compile_arg not in allowed_compile_args:
raise NotImplementedError(f"Unrecognized lowered.compile_args[{compile_arg}]")
# We have not implemented support for some of the compile_args. Check here that
# the compile_args have the values that have been implemented.
not_implemented_msgs = []
for compile_arg, check_value, err_msg in (
("spmd_lowering", lambda v: v, "True"),
("auto_spmd_lowering", lambda v: not v, "False"),
# tuple_args is a compilation flag, does not affect lowering.
("tuple_args", lambda v: True, "N/A"),
# unordered_effects do not change the calling convention. Those from
# jax.debug will also result in keepalive being non-empty and unsupported
# custom calls. The CallTfEffect is an exception, but we want to allow
# that one.
("unordered_effects", lambda v: True, "N/A"),
("ordered_effects", lambda v: True, "N/A"),
# used for TPU jax.debug, send/recv. Not supported yet.
("host_callbacks", lambda v: not v, "empty"),
# used on all platforms for callbacks. Not supported yet.
("keepalive", lambda v: not v, "empty"),
("pmap_nreps", lambda v: v == 1, "1"),
("shape_poly_state", lambda v: True, "N/A"),
):
if compile_arg in lowering.compile_args:
if not check_value(lowering.compile_args[compile_arg]):
not_implemented_msgs.append(
f"{compile_arg} must be {err_msg} and it is {lowering.compile_args[compile_arg]}")
if not_implemented_msgs:
raise NotImplementedError(
"serialization error, unimplemented lowered.compile_args:\n" +
"\n".join(not_implemented_msgs))
_CPU_FFI_KERNELS = [
"lapack_spotrf_ffi", "lapack_dpotrf_ffi", "lapack_cpotrf_ffi", "lapack_zpotrf_ffi",
"lapack_sgeqrf_ffi", "lapack_dgeqrf_ffi", "lapack_cgeqrf_ffi", "lapack_zgeqrf_ffi",
"lapack_sorgqr_ffi", "lapack_dorgqr_ffi", "lapack_cungqr_ffi", "lapack_zungqr_ffi",
"lapack_ssyevd_ffi", "lapack_dsyevd_ffi", "lapack_cheevd_ffi", "lapack_zheevd_ffi",
"lapack_sgeev_ffi", "lapack_dgeev_ffi", "lapack_cgeev_ffi", "lapack_zgeev_ffi",
"lapack_sgesdd_ffi", "lapack_dgesdd_ffi", "lapack_cgesdd_ffi", "lapack_zgesdd_ffi",
"lapack_sgetrf_ffi", "lapack_dgetrf_ffi", "lapack_cgetrf_ffi", "lapack_zgetrf_ffi",
"lapack_ssytrd_ffi", "lapack_dsytrd_ffi", "lapack_chetrd_ffi", "lapack_zhetrd_ffi",
"lapack_sgehrd_ffi", "lapack_dgehrd_ffi", "lapack_cgehrd_ffi", "lapack_zgehrd_ffi",
"lapack_sgees_ffi", "lapack_dgees_ffi", "lapack_cgees_ffi", "lapack_zgees_ffi",
"lapack_strsm_ffi", "lapack_dtrsm_ffi", "lapack_ctrsm_ffi", "lapack_ztrsm_ffi",
"lapack_sgtsv_ffi", "lapack_dgtsv_ffi", "lapack_cgtsv_ffi", "lapack_zgtsv_ffi",
]
_GPU_FFI_KERNELS = [
# lu on GPU
"cu_lu_pivots_to_permutation", "cusolver_getrf_ffi",
"hip_lu_pivots_to_permutation", "hipsolver_getrf_ffi",
# qr on GPU
"cusolver_geqrf_ffi", "cusolver_orgqr_ffi",
"hipsolver_geqrf_ffi", "hipsolver_orgqr_ffi",
# cholesky on GPU
"cusolver_potrf_ffi", "hipsolver_potrf_ffi",
# eigh on GPU
"cusolver_syevd_ffi", "hipsolver_syevd_ffi",
# svd on GPU
"cusolver_gesvd_ffi", "cusolver_gesvdj_ffi",
"hipsolver_gesvd_ffi", "hipsolver_gesvdj_ffi",
# tridiagonal on GPU
"cusolver_sytrd_ffi",
# tridiagonal_solve on GPU
"cusparse_gtsv2_ffi",
]
# These are the JAX custom call target names that are guaranteed to be stable.
# Their backwards compatibility is tested by back_compat_test.py.
_CUSTOM_CALL_TARGETS_GUARANTEED_STABLE = {
*_CPU_FFI_KERNELS,
*_GPU_FFI_KERNELS,
"Sharding", "SPMDFullToShardShape", "SPMDShardToFullShape",
"annotate_device_placement",
"cu_threefry2x32_ffi",
# Triton IR does not guarantee stability.
# "__gpu$xla.gpu.triton",
# eigh on TPU
"Eigh",
# qr and svd on TPU
"Qr", "ProductOfElementaryHouseholderReflectors",
# lu on TPU
"LuDecomposition",
# ApproxTopK on TPU
"ApproxTopK", "stablehlo.dynamic_approx_top_k",
"tf.call_tf_function", # From jax2tf.call_tf(func, call_tf_graph=True)
"tpu_custom_call", # Pallas/TPU kernels
"AllocateBuffer", # lax.empty implementation
"mosaic_gpu_v2", # Pallas Mosaic GPU kernels
# TODO(burmako): maintain backwards compatibility for these, until they
# are upstreamed to StableHLO.
# See https://github.com/openxla/stablehlo/issues/8.
"stablehlo.dynamic_reduce_window",
"stablehlo.dynamic_rng_bit_generator",
"stablehlo.dynamic_top_k",
"shape_assertion", # Used by shape_poly to evaluate assertions
}
check_sharding_pattern = re.compile(r"^({replicated}|{unknown shard_as.*}|.*\[({}, )*{}\]"")$")
def _check_module(mod: ir.Module, *,
disabled_checks: Sequence[DisabledSafetyCheck],
shardy_enabled: bool) -> bool:
"""Run a number of checks on the module.
Args:
disabled_checks: the safety checks that are disabled.
Returns True if the module uses non-replicated shardings.
"""
sharding_attr = ir.StringAttr.get("Sharding", mod.context)
allowed_custom_call_targets: set[str] = copy.copy(_CUSTOM_CALL_TARGETS_GUARANTEED_STABLE)
for dc in disabled_checks:
target = dc.is_custom_call()
if target is not None:
allowed_custom_call_targets.add(target)
allowed_custom_call_targets_attrs = {
ir.StringAttr.get(target, mod.context)
for target in allowed_custom_call_targets}
disallowed_custom_call_ops: list[str] = []
module_uses_non_replicated_sharding = False
def check_sharding(op: ir.Operation, loc: ir.Location):
try:
sharding = (op.attributes["sharding"] if shardy_enabled else
op.attributes["mhlo.sharding"])
except KeyError:
pass
else:
nonlocal module_uses_non_replicated_sharding
try:
sharding_value = (str(sharding) if shardy_enabled else
ir.StringAttr(sharding).value)
except UnicodeDecodeError:
# The mhlo.sharding attribute may be in pretty-printed format, or
# as an encoding of an HloSharding protobuf in some rare situations.
# We handle the latter by conservatively assuming it is non-replicated.
module_uses_non_replicated_sharding = True
else:
if not re.match(check_sharding_pattern, sharding_value):
module_uses_non_replicated_sharding = True
def check_op(op: ir.Operation):
op_name = op.operation.name
if op_name == "func.func":
check_sharding(op.operation, op.location)
elif op_name == "stablehlo.custom_call":
call_target_name_attr = op.operation.attributes["call_target_name"]
if (call_target_name_attr not in allowed_custom_call_targets_attrs):
disallowed_custom_call_ops.append(f"{op} at {op.location}")
if call_target_name_attr == sharding_attr:
check_sharding(op, op.location)
elif op_name == "sdy.sharding_constraint":
check_sharding(op, op.location)
def walk_operations(op):
check_op(op)
for region in op.operation.regions:
for block in region:
for op in block:
walk_operations(op)
walk_operations(mod)
if disallowed_custom_call_ops:
disallowed_custom_call_ops_str = "\n".join(disallowed_custom_call_ops)
msg = ("Cannot serialize code with custom calls whose targets have no "
"compatibility guarantees. "
"See https://docs.jax.dev/en/latest/export/export.html#compatibility-guarantees-for-custom-calls. "
"Examples are:\n"
f"{disallowed_custom_call_ops_str}.\n")
raise ValueError(msg)
return module_uses_non_replicated_sharding
def expand_in_shardings(in_shardings: Sequence[LoweringSharding],
module_kept_var_idx: Sequence[int],
nr_inputs: int) -> Sequence[LoweringSharding]:
"""Expands in_shardings with unspecified shardings for inputs not kept.
Assumes in_shardings corresponds to module_kept_var_idx.
"""
assert len(in_shardings) == len(module_kept_var_idx)
assert nr_inputs >= len(module_kept_var_idx)
all_in_shardings: list[LoweringSharding] = [sharding_impls.UNSPECIFIED] * nr_inputs
for idx, in_s in zip(sorted(module_kept_var_idx), in_shardings):
all_in_shardings[idx] = in_s
return tuple(all_in_shardings)
def _hlo_sharding_to_gspmd_sharding(
hlo_sharding: HloSharding | None,
device_assignment: Sequence[_jax.Device]
) -> sharding_impls.GSPMDSharding | None:
if hlo_sharding is None:
return None
return sharding_impls.GSPMDSharding(device_assignment, hlo_sharding)
def _hlo_sharding_to_named_sharding(
hlo_sharding: HloSharding | None,
mesh: mesh_lib.Mesh | mesh_lib.AbstractMesh):
if hlo_sharding is None:
return None
return sharding_impls.cached_named_sharding(
mesh, sharding_impls.parse_flatten_op_sharding(hlo_sharding, mesh)[0])
def _get_vjp_fun(
primal_fun: Callable,
*,
in_tree: tree_util.PyTreeDef,
in_avals: Sequence[core.AbstractValue],
out_avals: Sequence[core.AbstractValue],
in_shardings_hlo: tuple[HloSharding | None, ...],
out_shardings_hlo: tuple[HloSharding | None, ...],
device_assignment: Sequence[sharding_impls.Device] | None,
apply_jit: bool,
flat_primal_fun: bool = False,
mesh: mesh_lib.AbstractMesh | None = None,
) -> tuple[Callable, Sequence[core.AbstractValue]]:
# Since jax.vjp does not handle kwargs, it is easier to do all the work
# here with flattened functions.
# apply_jit=False is only used for backwards compatibility with the graph
# graph serialization. When apply_jit=True, we must pass a device assignment.
# flat_primal_fun=False is used only from jax2tf, and it means that the
# `primal_fun` takes PyTree `*args` and `**kwargs`.
def fun_vjp_jax(*args_and_out_cts_flat_jax):
# Takes a flat list of primals and output cotangents
def flattened_primal_fun_jax(*args_flat):
args, kwargs = in_tree.unflatten(args_flat)
res = primal_fun(*args, **kwargs)
res_flat, _ = tree_util.tree_flatten(res)
return res_flat
args_flat_jax, out_cts_flat_jax = util.split_list(args_and_out_cts_flat_jax,
[len(in_avals)])
_, pullback_jax = api.vjp(primal_fun if flat_primal_fun else flattened_primal_fun_jax,
*args_flat_jax)
return pullback_jax(out_cts_flat_jax)
vjp_in_avals = list(
itertools.chain(in_avals,
map(lambda a: a.to_tangent_aval(), out_avals)))
if apply_jit:
if mesh:
vjp_in_shardings = tuple(
_hlo_sharding_to_named_sharding(s, mesh)
for s in itertools.chain(in_shardings_hlo, out_shardings_hlo))
vjp_out_shardings = tuple(_hlo_sharding_to_named_sharding(s, mesh)
for s in in_shardings_hlo)
else:
assert device_assignment is not None
vjp_in_shardings = tuple(
_hlo_sharding_to_gspmd_sharding(s, device_assignment)
for s in itertools.chain(in_shardings_hlo, out_shardings_hlo))
vjp_out_shardings = tuple(
_hlo_sharding_to_gspmd_sharding(s, device_assignment)
for s in in_shardings_hlo)
return pjit.pjit(fun_vjp_jax,
in_shardings=vjp_in_shardings,
out_shardings=vjp_out_shardings), vjp_in_avals
else:
return fun_vjp_jax, vjp_in_avals
### Calling the exported function
def call(exported: Exported) -> Callable[..., typing.Array]:
if not isinstance(exported, Exported):
raise ValueError(
"The exported argument must be an export.Exported. "
f"Found {exported}.")
@custom_derivatives.custom_vjp
def f_flat(*args_flat):
return call_exported_p.bind(*args_flat, exported=exported)
def f_flat_vjp_fwd(*args_flat):
# Return the primal arguments as the residual
# TODO: keep as residuals only the arguments that are needed
return f_flat(*args_flat), args_flat
def f_flat_vjp_bwd(residual, ct_res_flat):
args_flat = residual # residual is the primal argument flat tuple
exp_vjp = exported.vjp()
# ct_res_flat may contain arrays of zeros where exp_vjp expect float0.
# We make the proper arrays of float0 to invoke exp_vjp.
def fix_float0_ct(ct_res, expected_aval):
if expected_aval.dtype != dtypes.float0:
return ct_res
return ad_util.zeros_like_jaxval(ct_res)
ct_res_fixed = map(fix_float0_ct,
ct_res_flat, exp_vjp.in_avals[len(args_flat):])
in_ct_flat = call_exported(exp_vjp)(*args_flat, *ct_res_fixed)
return in_ct_flat
f_flat.defvjp(f_flat_vjp_fwd, f_flat_vjp_bwd)
def f_imported(*args, **kwargs):
# since custom_vjp does not support kwargs, flatten the function first.
args_flat, in_tree = tree_util.tree_flatten((args, kwargs))
if in_tree != exported.in_tree:
# Give errors with the precise tree difference; use fake leaves so we can
# use tree_util.equality_errors.
in_args = in_tree.unflatten([0] * in_tree.num_leaves)
exp_in_args = exported.in_tree.unflatten([0] * exported.in_tree.num_leaves)
msg = (
"The invocation args and kwargs must have the same pytree structure "
f"as when the function '{exported.fun_name}' was exported, but they "
"have the following structural differences:\n" +
("\n".join(
f" - {shape_poly.args_kwargs_path_to_str(path)} is a {thing1} in the invocation and a "
f"{thing2} when exported, so {explanation}.\n"
for path, thing1, thing2, explanation
in tree_util.equality_errors(in_args, exp_in_args))))
raise ValueError(msg)
res_flat = f_flat(*args_flat)
return exported.out_tree.unflatten(res_flat)
return f_imported
call_exported = call
# A JAX primitive for invoking a serialized JAX function.
call_exported_p = core.Primitive("call_exported")
call_exported_p.multiple_results = True
@util.cache()
def _call_exported_abstract_eval(
*in_avals: core.AbstractValue,
exported: Exported
) -> tuple[tuple[core.AbstractValue, ...], set[effects.Effect]]:
exported_dim_vars = shape_poly.all_dim_vars(exported.in_avals)
assert len(in_avals) == len(exported.in_avals) # since the pytrees have the same structure
# Check that the expected shapes match the actual ones
for arg_idx, (exp_aval, actual_aval) in enumerate(zip(exported.in_avals, in_avals)):
if not isinstance(actual_aval, core.ShapedArray):
raise ValueError(f"Expected ShapedArray but got: {actual_aval}")
def pp_arg_dim(dim_idx: int | None) -> str:
return shape_poly.pretty_print_dimension_descriptor(exported.in_tree,
arg_idx, dim_idx)
if len(exp_aval.shape) != len(actual_aval.shape):
raise ValueError(
f"Rank mismatch for {pp_arg_dim(None)}: expected {exp_aval.shape} "
f"and called with {actual_aval.shape}")
if exp_aval.dtype != actual_aval.dtype:
raise ValueError(
f"Dtype mismatch for {pp_arg_dim(None)}: expected {exp_aval.dtype} "
f"and called with {actual_aval.dtype}")
for dim_idx, aval_d in enumerate(exp_aval.shape):
# If the exp_aval has a constant dimension then the actual argument must have
# a matching constant dimension.
if core.is_constant_dim(aval_d):
if (not core.is_constant_dim(actual_aval.shape[dim_idx]) or
aval_d != actual_aval.shape[dim_idx]):
raise ValueError(
f"Shape mismatch for {pp_arg_dim(dim_idx)} "
"(expected same constant): "
f"expected {exp_aval.shape} and called with {actual_aval.shape}")
# Must express the exported_dim_vars in terms of the shapes in in_avals.
solution, shape_constraints, synth_dim_vars = shape_poly.solve_dim_vars(
exported.in_avals, args_kwargs_tree=exported.in_tree)
synthetic_env: shape_poly.DimVarEnv = {
vname: in_avals[arg_idx].shape[dim_idx]
for (vname, arg_idx, dim_idx) in synth_dim_vars}
synthetic_eval = shape_poly.ShapeEvaluator(synthetic_env)
# We discharge all the constraints statically. This results in much simpler
# composability (because we do not have to worry about the constraints of the
# Exported called recursively; we only need to worry about entry-point
# constraints). This also makes sense from a composability point of view,
# because we get the same errors if we invoke the exported module, or if we
# trace the exported function. Consider for example, an exported module with
# signature `f32[a, a] -> f32[a]`. If we invoke the module with an argument
# `f32[c, d]` it is better to fail because `c == d` is inconclusive, than
# succeed and add a compile-time check that `c == d`. In the latter case,
# it would be ambiguous whether we should continue tracing with a result
# of type `f32[c]` or `f32[d]`.
shape_constraints.check_statically(synthetic_eval)
exported_dim_values = [synthetic_eval.evaluate(solution[var]) # type: ignore[arg-type]
for var in exported_dim_vars]
out_avals = tuple(
core.ShapedArray(core.evaluate_shape(out_aval.shape, exported_dim_vars,
*exported_dim_values),
dtype=out_aval.dtype, weak_type=out_aval.weak_type,
memory_space=out_aval.memory_space)
for out_aval in exported.out_avals)
return out_avals, set(exported.ordered_effects + exported.unordered_effects)
call_exported_p.def_effectful_abstract_eval(_call_exported_abstract_eval)
def _call_exported_impl(*args, exported: Exported):
return dispatch.apply_primitive(call_exported_p, *args, exported=exported)
call_exported_p.def_impl(_call_exported_impl)
def get_mesh_from_symbol(symtab: ir.SymbolTable) -> mesh_lib.AbstractMesh:
if "mesh" not in symtab:
return mesh_lib.empty_abstract_mesh
mesh_attr = sdy.MeshAttr(symtab["mesh"].mesh)
axes = [sdy.MeshAxisAttr(a) for a in mesh_attr.axes]
if not axes:
return mesh_lib.empty_abstract_mesh
axes_sizes = tuple(a.size for a in axes)
axes_names = tuple(a.name for a in axes)
return mesh_lib.AbstractMesh(axes_sizes, axes_names)
def has_sdy_meshes_in_frontend_attributes(submodule: ir.Module) -> bool:
if "mhlo.frontend_attributes" not in submodule.operation.attributes:
return False
frontend_attributes = submodule.operation.attributes[
"mhlo.frontend_attributes"
]
return "xla.sdy.meshes" in frontend_attributes
def has_sdy_mesh(symtab: ir.SymbolTable, submodule: ir.Module) -> bool:
for mesh_name in ("mesh", "empty_mesh", "maximal_mesh_0"):
if mesh_name in symtab:
return isinstance(symtab[mesh_name], sdy.MeshOp)
return has_sdy_meshes_in_frontend_attributes(submodule)
def _call_exported_lowering(ctx: mlir.LoweringRuleContext, *args,
exported: Exported):
if exported.uses_global_constants:
ctx.module_context.shape_poly_state.uses_dim_vars = True
submodule = ir.Module.parse(exported.mlir_module())
symtab = ir.SymbolTable(submodule.operation)
shardy_enabled = has_sdy_mesh(symtab, submodule)
if shardy_enabled:
if not config.use_shardy_partitioner.value:
raise ValueError(
"The function was exported with shardy enabled but you are calling "
"it with Shardy disabled. Please enable Shardy using "
"`--jax_use_shardy_partitioner=True`.")
# TODO(b/422690222): remove this pass once we don't need to support 6m
# old exported modules.
if has_sdy_meshes_in_frontend_attributes(submodule):
with submodule.context:
pipeline = passmanager.PassManager.parse(
'builtin.module(xla-sdy-round-trip-import-shardy-attrs)')
pipeline.run(submodule.operation)
with submodule.context:
pipeline = passmanager.PassManager.parse(
'builtin.module(sdy-lift-inlined-meshes)')
pipeline.run(submodule.operation)
mesh = None
if shardy_enabled:
mesh = get_mesh_from_symbol(symtab)
axis_context = ctx.module_context.axis_context
if isinstance(axis_context, sharding_impls.ShardingContext):
num_devices = axis_context.num_devices
elif isinstance(axis_context, sharding_impls.SPMDAxisContext):
num_devices = axis_context.mesh.size
elif isinstance(axis_context, sharding_impls.ReplicaAxisContext):
num_devices = axis_context.axis_env.nreps
else:
raise NotImplementedError(type(axis_context))
if num_devices != exported.nr_devices and exported.nr_devices != 1:
raise ValueError(
f"Function {exported.fun_name} was exported for "
f"{exported.nr_devices} devices and is called in a context with "
f"{num_devices} devices, which is not allowed."
)
# Apply in_shardings
if mesh:
# A mesh only exists if Shardy is enabled.
args = tuple(
wrap_with_sharding(
ctx, x, x_aval,
_hlo_sharding_to_named_sharding(x_sharding, mesh), use_shardy=True) # type: ignore[arg-type]
for x, x_aval, x_sharding in zip(args, ctx.avals_in, exported.in_shardings_hlo))
else:
# Since there is no mesh - either due to shardy being disabled or the loaded
# function being lowered for GSPMD (so no shardy mesh) - need to create a
# GSPMD sharding from the HLO sharding (can't use shardy lowering).
args = tuple(
wrap_with_sharding(ctx, x, x_aval, x_sharding, use_shardy=False)
for x, x_aval, x_sharding in zip(args, ctx.avals_in, exported.in_shardings_hlo))
# The called function may have been exported with polymorphic shapes and called
# now with more refined shapes. We insert hlo.ConvertOp to ensure the module
# is valid.
def convert_shape(x: ir.Value, x_aval: core.AbstractValue, new_aval: core.AbstractValue) -> ir.Value:
new_ir_type = mlir.aval_to_ir_type(new_aval)
if x.type != new_ir_type:
return hlo.convert(mlir.aval_to_ir_type(new_aval), x)
else:
return x
callee_type = symtab["main"].type
# TODO: maybe cache multiple calls
fn = mlir.merge_mlir_modules(ctx.module_context.module,
f"call_exported_{exported.fun_name}",
submodule,
dst_symtab=ctx.module_context.symbol_table)
submodule_args: list[ir.Value] = []
# All the platforms for the current lowering must be among the platforms
# for which the callee was lowered.
lowering_platforms = ctx.module_context.platforms
callee_lowering_platform_index: list[int] = []
for platform in lowering_platforms:
if platform in exported.platforms:
callee_lowering_platform_index.append(
exported.platforms.index(platform))
elif DisabledSafetyCheck.platform() in exported.disabled_safety_checks:
callee_lowering_platform_index.append(0)
else:
raise ValueError(
f"Function '{exported.fun_name}' was exported for "
f"platforms '{exported.platforms}' but it is used "
f"on '{lowering_platforms}'.")
if len(exported.platforms) > 1:
# The exported module takes a platform index argument
if len(lowering_platforms) > 1:
current_platform_idx = ctx.dim_var_values[0]
else:
current_platform_idx = cast(ir.Value, mlir.ir_constant(np.int32(0)))
# Compute the rule index based on the current platform
i32_type = mlir.aval_to_ir_type(core.ShapedArray((), dtype=np.int32))
if current_platform_idx.type != i32_type:
current_platform_idx = hlo.ConvertOp(i32_type, current_platform_idx)
callee_platform_idx = hlo.CaseOp([i32_type],
index=current_platform_idx,
num_branches=len(lowering_platforms))
for i in range(len(lowering_platforms)):
branch = callee_platform_idx.regions[i].blocks.append()
with ir.InsertionPoint(branch):
hlo.return_([mlir.ir_constant(
np.int32(callee_lowering_platform_index[i]))])
if callee_platform_idx.result.type != callee_type.inputs[0]:
callee_platform_idx = hlo.ConvertOp(callee_type.inputs[0],
callee_platform_idx)
submodule_args.append(callee_platform_idx)
else:
assert len(lowering_platforms) == 1
ordered_effects = exported.ordered_effects
for eff in ordered_effects:
token_in = ctx.tokens_in.get(eff)
submodule_args.append(token_in)
kept_args = [
convert_shape(a, a_aval, exported_in_aval)
for i, (a, a_aval, exported_in_aval) in enumerate(zip(args, ctx.avals_in, exported.in_avals))
if i in exported.module_kept_var_idx]
submodule_args = submodule_args + kept_args
call = func_dialect.CallOp(callee_type.results,
ir.FlatSymbolRefAttr.get(fn),
submodule_args)
if ordered_effects:
tokens_out = {eff: (call.results[effect_idx],)
for effect_idx, eff in enumerate(ordered_effects)}
ctx.set_tokens_out(mlir.TokenSet(tokens_out))
# The ctx.avals_out already contain the abstract values refined by
# _call_exported_abstract_eval.
results = tuple(
convert_shape(out, out_aval, refined_out_aval)
for out, out_aval, refined_out_aval in zip(call.results[len(ordered_effects):],
exported.out_avals, ctx.avals_out))
# Apply out_shardings
if mesh:
# A mesh only exists if Shardy is enabled.
results = tuple(
wrap_with_sharding(
ctx, x, x_aval, _hlo_sharding_to_named_sharding(x_sharding, mesh),
use_shardy=True) # type: ignore[arg-type]
for x, x_aval, x_sharding in zip(results, ctx.avals_out, exported.out_shardings_hlo))
else:
# Since there is no mesh - either due to shardy being disabled or the loaded
# function being lowered for GSPMD (so no shardy mesh) - need to create a
# GSPMD sharding from the HLO sharding (can't use shardy lowering).
results = tuple(
wrap_with_sharding(ctx, x, x_aval, x_sharding, use_shardy=False)
for x, x_aval, x_sharding in zip(results, ctx.avals_out, exported.out_shardings_hlo))
return results
mlir.register_lowering(call_exported_p, _call_exported_lowering)
def wrap_with_sharding(
ctx: mlir.LoweringRuleContext,
x: ir.Value,
x_aval: core.AbstractValue,
x_sharding: sharding_impls.NamedSharding | sharding_impls.GSPMDSharding | HloSharding | None,
use_shardy: bool,
) -> ir.Value:
if x_sharding is None:
return x
if use_shardy:
x_sharding = x_sharding._to_sdy_sharding(x_aval.ndim) # type: ignore
else:
x_sharding = x_sharding.to_proto() # type: ignore
return mlir.wrap_with_sharding_op(ctx, x, x_aval, x_sharding, # type: ignore[arg-type]
allow_shardy_lowering=use_shardy)
| _BuildFromChildren |
python | kamyu104__LeetCode-Solutions | Python/check-if-binary-string-has-at-most-one-segment-of-ones.py | {
"start": 29,
"end": 183
} | class ____(object):
def checkOnesSegment(self, s):
"""
:type s: str
:rtype: bool
"""
return "01" not in s
| Solution |
python | mlflow__mlflow | mlflow/exceptions.py | {
"start": 6671,
"end": 7320
} | class ____(MlflowTracingException):
"""Exception thrown for trace data related error"""
def __init__(
self, error_code: str, request_id: str | None = None, artifact_path: str | None = None
):
if request_id:
self.ctx = f"request_id={request_id}"
elif artifact_path:
self.ctx = f"path={artifact_path}"
if error_code == NOT_FOUND:
super().__init__(f"Trace data not found for {self.ctx}", error_code=error_code)
elif error_code == INVALID_STATE:
super().__init__(f"Trace data is corrupted for {self.ctx}", error_code=error_code)
| MlflowTraceDataException |
python | huggingface__transformers | tests/models/instructblip/test_modeling_instructblip.py | {
"start": 13685,
"end": 17542
} | class ____:
def __init__(
self,
parent,
vision_kwargs=None,
qformer_kwargs=None,
text_kwargs=None,
is_training=True,
num_query_tokens=10,
image_token_index=4,
):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
self.parent = parent
self.vision_model_tester = InstructBlipVisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = InstructBlipQFormerModelTester(parent, **qformer_kwargs)
self.text_model_tester = InstructBlipTextModelDecoderOnlyTester(parent, **text_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.seq_length = self.text_model_tester.seq_length + num_query_tokens # need seq_length for common tests
self.is_training = is_training
self.num_query_tokens = num_query_tokens
self.image_token_index = image_token_index
def prepare_config_and_inputs(self):
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
_, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs()
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
config = self.get_config()
vision_tokens = (
torch.ones((input_ids.shape[0], self.num_query_tokens), device=torch_device, dtype=input_ids.dtype)
* self.image_token_index
)
input_ids[input_ids == self.image_token_index] = self.text_model_tester.pad_token_id
input_ids = torch.cat([vision_tokens, input_ids], dim=-1)
vision_attention_mask = torch.ones_like(vision_tokens)
attention_mask = torch.cat([vision_attention_mask, attention_mask], dim=-1)
return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
def get_config(self):
return InstructBlipConfig(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
text_config=self.text_model_tester.get_config(),
num_query_tokens=self.num_query_tokens,
image_token_index=self.image_token_index,
)
def create_and_check_for_conditional_generation(
self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
):
model = InstructBlipForConditionalGeneration(config).to(torch_device).eval()
with torch.no_grad():
result = model(
pixel_values,
input_ids=input_ids,
attention_mask=attention_mask,
qformer_input_ids=qformer_input_ids,
qformer_attention_mask=qformer_attention_mask,
)
expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length
self.parent.assertEqual(
result.logits.shape,
(self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"qformer_input_ids": qformer_input_ids,
"qformer_attention_mask": qformer_attention_mask,
}
return config, inputs_dict
@require_torch
| InstructBlipForConditionalGenerationDecoderOnlyModelTester |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py | {
"start": 739,
"end": 4172
} | class ____(RpcAgentTestFixture):
@dist_init
def test_get_gradients(self):
@torch.jit.script
def dist_get_gradients(context_id: int) -> dict[Tensor, Tensor]:
return dist_autograd.get_gradients(context_id)
FileCheck().check("get_gradients").run(str(dist_get_gradients.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_dist_backward(self):
if self.rank != 0:
return
@torch.jit.script
def dist_backward_script(context_id: int, loss: torch.Tensor):
dist_autograd.backward(context_id, [loss])
FileCheck().check("dist_backward").run(str(dist_backward_script.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
dist_backward_script(context_id, loss)
@dist_init
def test_jit_fork_within_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
res = fork_add(t1, t2, dst_worker_name)
loss = res.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
@dist_init
def test_restore_context_after_swtich_to_jit_thread(self):
if self.rank != 0:
return
@torch.jit.script
def forward_script(
context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor
) -> tuple[Tensor, Tensor]:
res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
res1 = res1_fut.wait() # After this, the script runs in a new JIT thread.
loss1 = res1.sum()
# SendRpcBackward is not attached, since DistAutogradContext is lost here.
res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
res2 = res2_fut.wait()
loss2 = res2.sum()
return loss1, loss2
with dist_autograd.context() as context_id:
t1 = torch.ones((2, 3), requires_grad=True)
t2 = torch.ones((2, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2)
dist_autograd.backward(context_id, [loss0, loss1])
grad0, grad1 = dist_autograd.get_gradients(context_id)
self.assertEqual(grad0, grad1)
| JitDistAutogradTest |
python | matplotlib__matplotlib | lib/matplotlib/axis.py | {
"start": 12960,
"end": 15037
} | class ____(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in data coords, y in axes coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_xaxis_transform("tick1"))
self.tick2line.set(
data=([0], [1]), transform=ax.get_xaxis_transform("tick2"))
self.gridline.set(
data=([0, 0], [0, 1]), transform=ax.get_xaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=0, y=1,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKDOWN, mlines.TICKUP),
'in': (mlines.TICKUP, mlines.TICKDOWN),
'inout': ('|', '|'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_xdata((loc,))
self.tick2line.set_xdata((loc,))
self.gridline.set_xdata((loc,))
self.label1.set_x(loc)
self.label2.set_x(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervalx
| XTick |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 932478,
"end": 932755
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
OrganizationAuditEntryData,
RepositoryAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
| RepoConfigDisableAnonymousGitAccessAuditEntry |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 122792,
"end": 123075
} | class ____(sgqlc.types.Enum):
"""Properties by which workflow run connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order workflow runs by most recently created
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| WorkflowRunOrderField |
python | nedbat__coveragepy | lab/parser.py | {
"start": 431,
"end": 7481
} | class ____:
"""A main for code parsing experiments."""
def main(self, args):
"""A main function for trying the code from the command line."""
parser = optparse.OptionParser()
parser.add_option("-d", action="store_true", dest="dis", help="Disassemble")
parser.add_option(
"-R", action="store_true", dest="recursive", help="Recurse to find source files"
)
parser.add_option("-q", action="store_true", dest="quiet", help="Suppress output")
parser.add_option("-s", action="store_true", dest="source", help="Show analyzed source")
parser.add_option("-t", action="store_true", dest="tokens", help="Show tokens")
options, args = parser.parse_args()
if options.recursive:
if args:
root = args[0]
else:
root = "."
for root, _, _ in os.walk(root):
for f in glob.glob(root + "/*.py"):
if not options.quiet:
print(f"Parsing {f}")
self.one_file(options, f)
elif not args:
parser.print_help()
else:
self.one_file(options, args[0])
def one_file(self, options, filename):
"""Process just one file."""
# `filename` can have a line number suffix. In that case, extract those
# lines, dedent them, and use that. This is for trying test cases
# embedded in the test files.
if match := re.search(r"^(.*):(\d+)-(\d+)$", filename):
filename, start, end = match.groups()
start, end = int(start), int(end)
else:
start = end = None
try:
text = get_python_source(filename)
if start is not None:
lines = text.splitlines(True)
text = textwrap.dedent("".join(lines[start - 1 : end]).replace("\\\\", "\\"))
pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover")
pyparser.parse_source()
except Exception as err:
print(f"{err}")
return
if options.dis:
print("Main code:")
disassemble(pyparser.text)
arcs = pyparser.arcs()
if options.source or options.tokens:
pyparser.show_tokens = options.tokens
pyparser.parse_source()
if options.source:
arc_chars = self.arc_ascii_art(arcs)
if arc_chars:
arc_width = max(len(a) for a in arc_chars.values())
exit_counts = pyparser.exit_counts()
for lineno, ltext in enumerate(pyparser.text.splitlines(), start=1):
marks = [" "] * 6
a = " "
if lineno in pyparser.raw_statements:
marks[0] = "-"
if lineno in pyparser.statements:
marks[1] = "="
exits = exit_counts.get(lineno, 0)
if exits > 1:
marks[2] = str(exits)
if lineno in pyparser.raw_docstrings:
marks[3] = '"'
if lineno in pyparser.raw_excluded:
marks[4] = "X"
elif lineno in pyparser.excluded:
marks[4] = "×"
if lineno in pyparser.multiline_map.values():
marks[5] = "o"
elif lineno in pyparser.multiline_map.keys():
marks[5] = "."
if arc_chars:
a = arc_chars[lineno].ljust(arc_width)
else:
a = ""
if not options.quiet:
print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext))
def arc_ascii_art(self, arcs):
"""Draw arcs as ascii art.
Returns a dictionary mapping line numbers to ascii strings to draw for
that line.
"""
plus_ones = set()
arc_chars = collections.defaultdict(str)
for lfrom, lto in sorted(arcs):
if lfrom < 0:
arc_chars[lto] += "v"
elif lto < 0:
arc_chars[lfrom] += "^"
else:
if lfrom == lto - 1:
plus_ones.add(lfrom)
arc_chars[lfrom] += "" # ensure this line is in arc_chars
continue
if lfrom < lto:
l1, l2 = lfrom, lto
else:
l1, l2 = lto, lfrom
w = first_all_blanks(arc_chars[l] for l in range(l1, l2 + 1))
for l in range(l1, l2 + 1):
if l == lfrom:
ch = "<"
elif l == lto:
ch = ">"
else:
ch = "|"
arc_chars[l] = set_char(arc_chars[l], w, ch)
# Add the plusses as the first character
for lineno, arcs in arc_chars.items():
arc_chars[lineno] = ("+" if lineno in plus_ones else " ") + arcs
return arc_chars
def all_code_objects(code):
"""Iterate over all the code objects in `code`."""
stack = [code]
while stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = stack.pop()
stack.extend(c for c in code.co_consts if isinstance(c, types.CodeType))
yield code
def disassemble(text):
"""Disassemble code, for ad-hoc experimenting."""
code = compile(text, "", "exec", dont_inherit=True)
for code_obj in all_code_objects(code):
if text:
srclines = text.splitlines()
else:
srclines = None
print("\n%s: " % code_obj)
upto = None
for inst in dis.get_instructions(code_obj):
if inst.starts_line is not None:
if srclines:
upto = upto or inst.starts_line - 1
while upto <= inst.starts_line - 1:
print("{:>100}{}".format("", srclines[upto]))
upto += 1
elif inst.offset > 0:
print("")
line = inst._disassemble()
print(f"{line:<70}")
print("")
def set_char(s, n, c):
"""Set the nth char of s to be c, extending s if needed."""
s = s.ljust(n)
return s[:n] + c + s[n + 1 :]
def blanks(s):
"""Return the set of positions where s is blank."""
return {i for i, c in enumerate(s) if c == " "}
def first_all_blanks(ss):
"""Find the first position that is all blank in the strings ss."""
ss = list(ss)
blankss = blanks(ss[0])
for s in ss[1:]:
blankss &= blanks(s)
if blankss:
return min(blankss)
else:
return max(len(s) for s in ss)
if __name__ == "__main__":
ParserMain().main(sys.argv[1:])
| ParserMain |
python | scrapy__scrapy | tests/test_downloader_handler_twisted_http11.py | {
"start": 855,
"end": 930
} | class ____(HTTP11DownloadHandlerMixin, TestHttps11Base):
pass
| TestHttps11 |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 49548,
"end": 49648
} | class ____:
def __str__(self) -> str:
return "REMOVED"
REMOVED = RemovedArg()
| RemovedArg |
python | encode__starlette | starlette/convertors.py | {
"start": 131,
"end": 388
} | class ____(Generic[T]):
regex: ClassVar[str] = ""
def convert(self, value: str) -> T:
raise NotImplementedError() # pragma: no cover
def to_string(self, value: T) -> str:
raise NotImplementedError() # pragma: no cover
| Convertor |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/unique_operation_names.py | {
"start": 69,
"end": 1114
} | class ____(ValidationRule):
__slots__ = 'known_operation_names',
def __init__(self, context):
super(UniqueOperationNames, self).__init__(context)
self.known_operation_names = {}
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
operation_name = node.name
if not operation_name:
return
if operation_name.value in self.known_operation_names:
self.context.report_error(GraphQLError(
self.duplicate_operation_name_message(operation_name.value),
[self.known_operation_names[operation_name.value], operation_name]
))
else:
self.known_operation_names[operation_name.value] = operation_name
return False
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
return False
@staticmethod
def duplicate_operation_name_message(operation_name):
return 'There can only be one operation named "{}".'.format(operation_name)
| UniqueOperationNames |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-file/source_file/client.py | {
"start": 24911,
"end": 25392
} | class ____(URLFile):
"""Updating of default logic:
This connector shouldn't work with local files.
"""
def __init__(self, url: str, provider: dict, binary=None, encoding=None):
storage_name = provider["storage"].lower()
if url.startswith("file://") or storage_name == LOCAL_STORAGE_NAME:
raise RuntimeError("the local file storage is not supported by this connector.")
super().__init__(url, provider, binary, encoding)
| URLFileSecure |
python | walkccc__LeetCode | solutions/1458. Max Dot Product of Two Subsequences/1458.py | {
"start": 0,
"end": 454
} | class ____:
def maxDotProduct(self, A: list[int], B: list[int]) -> int:
m = len(A)
n = len(B)
# dp[i][j] := the maximum dot product of the two subsequences nums[0..i)
# and nums2[0..j)
dp = [[-math.inf] * (n + 1) for _ in range(m + 1)]
for i in range(m):
for j in range(n):
dp[i + 1][j + 1] = max(dp[i][j + 1], dp[i + 1][j],
max(0, dp[i][j]) + A[i] * B[j])
return dp[m][n]
| Solution |
python | google__jax | tests/lax_numpy_operators_test.py | {
"start": 23575,
"end": 35993
} | class ____(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy operators."""
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
@parameterized.named_parameters(_create_named_parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op_name=rec.name, rng_factory=rec.rng_factory,
check_dtypes=rec.check_dtypes, tolerance=rec.tolerance,
inexact=rec.inexact, kwargs=rec.kwargs or {}, alias=rec.alias)],
[dict(shapes=shapes, dtypes=dtypes)
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes))],
)
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, op_name, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact, kwargs, alias):
np_op = partial(getattr(np, op_name) if hasattr(np, op_name) else getattr(np, alias), **kwargs)
jnp_op = partial(getattr(jnp, op_name), **kwargs)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
if jtu.test_device_matches(["tpu"]) and op_name in (
"arccosh", "arcsinh", "sinh", "cosh", "tanh", "sin", "cos", "tan",
"log", "log1p", "log2", "log10", "exp", "expm1", "exp2", "pow",
"power", "logaddexp", "logaddexp2", "i0", "acosh", "asinh"):
tol = jtu.join_tolerance(tol, 1e-4)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(_create_named_parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(name=rec.name, rng_factory=rec.rng_factory, tol=rec.tolerance)],
[dict(shapes=shapes, dtypes=dtypes)
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes))],
)
for rec in JAX_OPERATOR_OVERLOADS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(_create_named_parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(name=rec.name, rng_factory=rec.rng_factory,
op_tolerance=rec.tolerance)],
[dict(shapes=shapes, dtypes=dtypes)
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes))],
)
for rec in JAX_RIGHT_OPERATOR_OVERLOADS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@jtu.sample_product(
name=[rec.name for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2],
othertype=[dict, list, tuple, set],
)
def testOperatorOverloadErrors(self, name, othertype):
# Test that binary operators with builtin collections raise a TypeError
# and report the types in the correct order.
data = [(1, 2), (2, 3)]
arr = jnp.array(data)
other = othertype(data)
msg = f"unsupported operand type.* 'ArrayImpl' and '{othertype.__name__}'"
with self.assertRaisesRegex(TypeError, msg):
getattr(arr, name)(other)
@jtu.sample_product(
name=[rec.name for rec in JAX_RIGHT_OPERATOR_OVERLOADS if rec.nargs == 2],
othertype=[dict, list, tuple, set],
)
def testRightOperatorOverloadErrors(self, name, othertype):
# Test that binary operators with builtin collections raise a TypeError
# and report the types in the correct order.
data = [(1, 2), (2, 3)]
arr = jnp.array(data)
other = othertype(data)
msg = f"unsupported operand type.* '{othertype.__name__}' and 'ArrayImpl'"
with self.assertRaisesRegex(TypeError, msg):
getattr(arr, name)(other)
@jtu.sample_product(
[dict(op_name=rec.name, rng_factory=rec.rng_factory, dtype=dtype)
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes],
)
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
@parameterized.named_parameters(_create_named_parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(name=rec.name, rng_factory=rec.rng_factory, alias=rec.alias)],
shapes=filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs)),
dtypes=filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)),
)
for rec in JAX_BITWISE_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, name, rng_factory, shapes, dtypes, alias):
np_op = getattr(np, name) if hasattr(np, name) else getattr(np, alias)
jnp_op = getattr(jnp, name)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op), jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@jtu.sample_product(
shape=array_shapes,
dtype=int_dtypes + unsigned_dtypes,
)
def testBitwiseCount(self, shape, dtype):
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.bitwise_count, jnp.bitwise_count, args_maker)
self._CompileAndCheck(jnp.bitwise_count, args_maker)
@jtu.sample_product(
[dict(dtypes=dtypes, shapes=shapes)
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))
],
op=[jnp.left_shift, jnp.bitwise_left_shift, jnp.right_shift, jnp.bitwise_right_shift],
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.enable_x64.value:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
np_op = getattr(np, op.__name__)
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CompileAndCheck(op, args_maker)
self._CheckAgainstNumpy(np_op, op, args_maker)
# This test can be deleted once we test against NumPy 2.0.
@jtu.sample_product(
shape=all_shapes,
dtype=complex_dtypes
)
def testSignComplex(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = np.sign
jnp_fun = jnp.sign
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testDeferToNamedTuple(self):
class MyArray(NamedTuple):
arr: jax.Array
def __mul__(self, other):
return MyArray(self.arr * other)
def __rmul__(self, other):
return MyArray(other * self.arr)
a = MyArray(jnp.ones(4))
b = jnp.ones(4)
self.assertIsInstance(a * b, MyArray)
self.assertIsInstance(jax.jit(operator.mul)(a, b), MyArray)
self.assertIsInstance(b * a, MyArray)
self.assertIsInstance(jax.jit(operator.mul)(b, a), MyArray)
def testI0Grad(self):
# Regression test for https://github.com/jax-ml/jax/issues/11479
dx = jax.grad(jax.numpy.i0)(0.0)
self.assertArraysEqual(dx, 0.0)
@jtu.sample_product(
shape=all_shapes,
dtype=default_dtypes,
)
def testSpacingIntegerInputs(self, shape, dtype):
rng = jtu.rand_int(self.rng(), low=-64, high=64)
args_maker = lambda: [rng(shape, dtype)]
computation_dtype = jnp.spacing(rng(shape, dtype)).dtype
np_func = lambda x: np.spacing(np.array(x).astype(computation_dtype))
self._CheckAgainstNumpy(np_func, jnp.spacing, args_maker, check_dtypes=True, tol=0)
self._CompileAndCheck(jnp.spacing, args_maker, tol=0)
@jtu.sample_product(dtype = float_dtypes)
@jtu.skip_on_devices("tpu")
def testSpacingSubnormals(self, dtype):
zero = np.array(0, dtype=dtype)
inf = np.array(np.inf, dtype=dtype)
x = [zero]
for i in range(5):
x.append(np.nextafter(x[-1], -inf)) # negative denormals
x = x[::-1]
for i in range(5):
x.append(np.nextafter(x[-1], inf)) # positive denormals
x = np.array(x, dtype=dtype)
args_maker = lambda: [x]
self._CheckAgainstNumpy(np.spacing, jnp.spacing, args_maker, check_dtypes=True, tol=0)
self._CompileAndCheck(jnp.spacing, args_maker, tol=0)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| JaxNumpyOperatorTests |
python | google__jax | jax/_src/pallas/core.py | {
"start": 1736,
"end": 2563
} | class ____:
def __repr__(self):
return "DynamicGridDim"
dynamic_grid_dim = DynamicGridDim()
partial = functools.partial
GridElement = int | jax_typing.Array
GridName = Hashable
GridNames = tuple[Hashable, ...] | None
NamedGrid = tuple[tuple[GridName, int], ...]
TupleGrid = tuple[GridElement, ...]
Grid = Union[NamedGrid, TupleGrid]
StaticGrid = tuple[int, ...]
GridMappingGrid = tuple[int | DynamicGridDim, ...]
OriginStr = str # The origin of a block spec, e.g. input[2]["field"]
# Datatype for semaphore values in interpret mode.
# For now, we choose a relatively uncommon datatype (i16) so it is more easily
# identifiable in kernels.
# TODO(justinfu): Handle semaphores with a custom extended dtype.
SEMAPHORE_INTERPRET_DTYPE = jnp.int16
SEMAPHORE_MAX_VALUE = jnp.iinfo(SEMAPHORE_INTERPRET_DTYPE).max
| DynamicGridDim |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players.py | {
"start": 1702,
"end": 1874
} | class ____(ComputerPlayer):
def get_computer_move(self, game_state: GameState) -> Move | None:
return find_best_move_optimized(game_state)
| MinimaxComputerPlayerV2 |
python | huggingface__transformers | src/transformers/models/plbart/modeling_plbart.py | {
"start": 43634,
"end": 50805
} | class ____(PLBartPreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = {
"lm_head.weight": "model.shared.weight",
}
def __init__(self, config: PLBartConfig):
super().__init__(config)
self.model = PLBartModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.post_init()
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True
) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (:
obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior:
generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Mask-filling:
```python
>>> from transformers import AutoTokenizer, PLBartForConditionalGeneration
>>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base")
>>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
>>> # en_XX is the language symbol id <LID> for English
>>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX"
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['first', 'same', 'highest', 'result', 'number']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0])
lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id)
| PLBartForConditionalGeneration |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 125070,
"end": 127891
} | class ____(TypedDict, total=False):
type: Required[Literal['arguments']]
arguments_schema: Required[list[ArgumentsParameter]]
validate_by_name: bool
validate_by_alias: bool
var_args_schema: CoreSchema
var_kwargs_mode: VarKwargsMode
var_kwargs_schema: CoreSchema
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def arguments_schema(
arguments: list[ArgumentsParameter],
*,
validate_by_name: bool | None = None,
validate_by_alias: bool | None = None,
var_args_schema: CoreSchema | None = None,
var_kwargs_mode: VarKwargsMode | None = None,
var_kwargs_schema: CoreSchema | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> ArgumentsSchema:
"""
Returns a schema that matches an arguments schema, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
param_a = core_schema.arguments_parameter(
name='a', schema=core_schema.str_schema(), mode='positional_only'
)
param_b = core_schema.arguments_parameter(
name='b', schema=core_schema.bool_schema(), mode='positional_only'
)
schema = core_schema.arguments_schema([param_a, param_b])
v = SchemaValidator(schema)
assert v.validate_python(('hello', True)) == (('hello', True), {})
```
Args:
arguments: The arguments to use for the arguments schema
validate_by_name: Whether to populate by the parameter names, defaults to `False`.
validate_by_alias: Whether to populate by the parameter aliases, defaults to `True`.
var_args_schema: The variable args schema to use for the arguments schema
var_kwargs_mode: The validation mode to use for variadic keyword arguments. If `'uniform'`, every value of the
keyword arguments will be validated against the `var_kwargs_schema` schema. If `'unpacked-typed-dict'`,
the `var_kwargs_schema` argument must be a [`typed_dict_schema`][pydantic_core.core_schema.typed_dict_schema]
var_kwargs_schema: The variable kwargs schema to use for the arguments schema
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='arguments',
arguments_schema=arguments,
validate_by_name=validate_by_name,
validate_by_alias=validate_by_alias,
var_args_schema=var_args_schema,
var_kwargs_mode=var_kwargs_mode,
var_kwargs_schema=var_kwargs_schema,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| ArgumentsSchema |
python | PyCQA__pylint | tests/functional/m/mapping_context_py3.py | {
"start": 139,
"end": 264
} | class ____(type):
def __getitem__(cls, key):
return ord(key)
def keys(cls):
return ['a', 'b', 'c']
| Meta |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 47171,
"end": 47794
} | class ____(TypedDict, total=False):
type: Required[Literal['missing-sentinel']]
metadata: dict[str, Any]
serialization: SerSchema
def missing_sentinel_schema(
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> MissingSentinelSchema:
"""Returns a schema for the `MISSING` sentinel."""
return _dict_not_none(
type='missing-sentinel',
metadata=metadata,
serialization=serialization,
)
# must match input/parse_json.rs::JsonType::try_from
JsonType = Literal['null', 'bool', 'int', 'float', 'str', 'list', 'dict']
| MissingSentinelSchema |
python | kamyu104__LeetCode-Solutions | Python/valid-parenthesis-string.py | {
"start": 29,
"end": 462
} | class ____(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lower, upper = 0, 0 # keep lower bound and upper bound of '(' counts
for c in s:
lower += 1 if c == '(' else -1
upper -= 1 if c == ')' else -1
if upper < 0: break
lower = max(lower, 0)
return lower == 0 # range of '(' count is valid
| Solution |
python | google__pytype | pytype/tests/test_cmp2.py | {
"start": 71,
"end": 383
} | class ____(test_base.BaseTest):
def test_is(self):
"""SomeType is not be the same as AnotherType."""
self.Check("""
from typing import Optional
def f(x: Optional[str]) -> NoneType:
if x is None:
return x
else:
return None
""")
| InstanceUnequalityTest |
python | pytorch__pytorch | test/dynamo/test_recompiles.py | {
"start": 178,
"end": 20239
} | class ____(torch._dynamo.test_case.TestCase):
def test_inline_inbuilt_nn_modules_candidate(self):
def hook_flag_on(guard_manager, f_locals, builder):
self.assertTrue(
"[inline-inbuilt-nn-modules-candidate]" not in str(guard_manager)
)
def hook_flag_off(guard_manager, f_locals, builder):
self.assertTrue(
"[inline-inbuilt-nn-modules-candidate]" in str(guard_manager)
)
class SubMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(2, 2)
@torch.compile(backend="eager")
def forward(self, x):
return self.linear(x)
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.sm1 = SubMod()
self.sm2 = SubMod()
def forward(self, x):
return self.sm1(x) + self.sm2(x)
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
with (
install_guard_manager_testing_hook(hook_flag_on),
dc.patch(inline_inbuilt_nn_modules=True),
):
mod = Mod()
mod(torch.randn(2, 2))
with (
install_guard_manager_testing_hook(hook_flag_off),
dc.patch(inline_inbuilt_nn_modules=False),
):
mod = Mod()
mod(torch.randn(2, 2))
def test_automatic_dynamic_reduce_recompiles(self):
# Test the counterfactual, lots of recompiles without this config
def foo(x, y):
return x * y
def run_foo_6_times_and_count_recompiles(dynamic=None):
cnt = torch._dynamo.testing.CompileCounter()
x = torch.randn([2])
y = torch.randn([2])
opt = torch.compile(foo, backend=cnt, dynamic=dynamic)
opt(x, y)
x = torch.randn([3])
y = torch.randn([3])
opt(x, y)
x = torch.randn([4])
y = torch.randn([4])
opt(x, y)
opt(x, y)
x = torch.randn([5])
y = torch.randn([5])
opt(x, y)
opt(x, y)
x = torch.randn([6])
y = torch.randn([6])
opt(x, y)
return cnt
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", False)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_without_automatic():
return run_foo_6_times_and_count_recompiles()
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", True)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_with_automatic():
return run_foo_6_times_and_count_recompiles()
without = run_without_automatic()
self.assertEqual(without.frame_count, 5)
self.assertEqual(without.op_count, 5)
torch._dynamo.reset()
without = run_foo_6_times_and_count_recompiles(dynamic=False)
self.assertEqual(without.frame_count, 5)
self.assertEqual(without.op_count, 5)
torch._dynamo.reset()
with_automatic = run_with_automatic()
self.assertEqual(with_automatic.frame_count, 2)
self.assertEqual(with_automatic.op_count, 2)
torch._dynamo.reset()
with_automatic = run_foo_6_times_and_count_recompiles(dynamic=None)
self.assertEqual(with_automatic.frame_count, 2)
self.assertEqual(with_automatic.op_count, 2)
torch._dynamo.reset()
with_dynamic = run_foo_6_times_and_count_recompiles(dynamic=True)
self.assertEqual(with_dynamic.frame_count, 1)
self.assertEqual(with_dynamic.op_count, 1)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def test_recompiles_true_false_flop(self):
# Test the counterfactual, lots of recompiles without this config
def foo(x, y):
if x:
return y * 2
else:
return y * y
def run_foo_6_times_and_count_recompiles():
cnt = torch._dynamo.testing.CompileCounter()
opt = torch.compile(foo, backend=cnt, fullgraph=True)
x = True
y = torch.randn([2])
opt(x, y)
x = False
y = torch.randn([2])
opt(x, y)
x = True
y = torch.randn([3])
opt(x, y)
x = True
y = torch.randn([4])
opt(x, y)
x = True
y = torch.randn([5])
opt(x, y)
return cnt
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", False)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_without_automatic():
return run_foo_6_times_and_count_recompiles()
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", True)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_with_automatic():
return run_foo_6_times_and_count_recompiles()
without = run_without_automatic()
self.assertEqual(without.frame_count, 5)
self.assertEqual(without.op_count, 5)
torch._dynamo.reset()
with_automatic = run_with_automatic()
self.assertEqual(with_automatic.frame_count, 3)
self.assertEqual(with_automatic.op_count, 3)
def test_automatic_dynamic_tensor_scalar_change(self):
# Test the counterfactual, lots of recompiles without this config
def foo(x, y):
return x * y
def run_foo_6_times_and_count_recompiles_swap_types():
cnt = torch._dynamo.testing.CompileCounter()
x = torch.randn([2])
y = torch.randn([2])
opt = torch.compile(foo, backend=cnt)
opt(x, y)
x = torch.randn([3])
y = 3
opt(x, y)
x = torch.randn([4])
y = torch.randn([4])
opt(x, y)
opt(x, y)
x = torch.randn([5])
y = 4
opt(x, y)
opt(x, y)
x = torch.randn([6])
y = torch.randn([6])
opt(x, y)
return cnt
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", False)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_without_automatic():
return run_foo_6_times_and_count_recompiles_swap_types()
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", True)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_with_automatic():
return run_foo_6_times_and_count_recompiles_swap_types()
without = run_without_automatic()
self.assertEqual(without.frame_count, 5)
self.assertEqual(without.op_count, 5)
torch._dynamo.reset()
with_automatic = run_with_automatic()
self.assertEqual(with_automatic.frame_count, 3)
self.assertEqual(with_automatic.op_count, 3)
def test_aliasing_guard_failures(self):
def foo(a, b, c):
a.add_(b)
return c + 1
cnt = torch._dynamo.testing.CompileCounter()
compiled_foo = torch.compile(foo, backend=cnt, fullgraph=True)
x = torch.randn([3])
y = torch.randn([3])
z = torch.randn([3])
cmp_result = compiled_foo(
x.detach().clone(), y.detach().clone(), z.detach().clone()
)
eager_result = foo(x.detach().clone(), y.detach().clone(), z.detach().clone())
self.assertEqual(cmp_result, eager_result)
self.assertEqual(cnt.frame_count, 1)
cmp_result = compiled_foo(
z.detach().clone(), y.detach().clone(), x.detach().clone()
)
eager_result = foo(z.detach().clone(), y.detach().clone(), x.detach().clone())
self.assertEqual(cmp_result, eager_result)
# No recompile, alias preserved
self.assertEqual(cnt.frame_count, 1)
x_clone = x.detach().clone()
cmp_result = compiled_foo(x_clone, y.detach().clone(), x_clone)
x_clone = x.detach().clone()
eager_result = compiled_foo(x_clone, y.detach().clone(), x_clone)
self.assertEqual(cmp_result, eager_result)
# Recompile, alias changed
self.assertEqual(cnt.frame_count, 2)
def test_aliasing_guard_failures_with_globals(self):
g1 = torch.randn([3])
g2 = torch.randn([3])
def foo(a):
a.add_(g1)
return g2 + 1
cnt = torch._dynamo.testing.CompileCounter()
compiled_foo = torch.compile(foo, backend=cnt, fullgraph=True)
z = torch.randn([3])
cmp_result = compiled_foo(z.detach().clone())
eager_result = foo(z.detach().clone())
self.assertEqual(cmp_result, eager_result)
self.assertEqual(cnt.frame_count, 1)
g1 = g1.detach().clone()
cmp_result = compiled_foo(g1)
g1 = g1.detach().clone()
eager_result = compiled_foo(g1)
self.assertEqual(cmp_result, eager_result)
# Recompile, alias changed
self.assertEqual(cnt.frame_count, 2)
def test_dynamic_shape_parameter_recompile(self):
# Test the matrix multiplication with Parameters.
# Without the config assume_parameters_shapes_static_by_default,
# the torch.nn.Parameter shapes are assumed to be static which leads to recompilation
w = torch.nn.Parameter(torch.randn(3, 2))
def foo(x):
return x @ w
def run_foo_6_times_and_count_recompiles():
cnt = torch._dynamo.testing.CompileCounter()
opt = torch.compile(foo, backend=cnt, fullgraph=True)
x = torch.nn.Parameter(torch.randn(1, 3))
opt(x)
x = torch.nn.Parameter(torch.randn(10, 3))
opt(x)
x = torch.nn.Parameter(torch.randn(11, 3))
opt(x)
x = torch.nn.Parameter(torch.randn(15, 3))
opt(x)
x = torch.nn.Parameter(torch.randn(15, 3))
opt(x)
return cnt
@patch.object(torch._dynamo.config, "force_parameter_static_shapes", True)
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", False)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_static_comp_default_param():
return run_foo_6_times_and_count_recompiles()
@patch.object(torch._dynamo.config, "force_parameter_static_shapes", True)
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", True)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_dynamic_comp_default_param():
return run_foo_6_times_and_count_recompiles()
@patch.object(torch._dynamo.config, "force_parameter_static_shapes", False)
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", False)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_static_comp_dynamic_param():
return run_foo_6_times_and_count_recompiles()
@patch.object(torch._dynamo.config, "force_parameter_static_shapes", False)
@patch.object(torch._dynamo.config, "automatic_dynamic_shapes", True)
@patch.object(torch._dynamo.config, "assume_static_by_default", True)
def run_dynamic_comp_dynamic_param():
return run_foo_6_times_and_count_recompiles()
torch._dynamo.reset()
static_comp_default_param = run_static_comp_default_param()
self.assertEqual(static_comp_default_param.frame_count, 4)
self.assertEqual(static_comp_default_param.op_count, 4)
torch._dynamo.reset()
dynamic_comp_default_param = run_dynamic_comp_default_param()
self.assertEqual(dynamic_comp_default_param.frame_count, 4)
self.assertEqual(dynamic_comp_default_param.op_count, 4)
torch._dynamo.reset()
static_comp_dynamic_param = run_static_comp_dynamic_param()
self.assertEqual(static_comp_dynamic_param.frame_count, 4)
self.assertEqual(static_comp_dynamic_param.op_count, 4)
torch._dynamo.reset()
dynamic_comp_dynamic_param = run_dynamic_comp_dynamic_param()
self.assertEqual(dynamic_comp_dynamic_param.frame_count, 2)
self.assertEqual(dynamic_comp_dynamic_param.op_count, 2)
def test_simple_module_recompile(self):
class SimpleDropout(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dropout = torch.nn.Dropout(0.5)
self.linear = torch.nn.Linear(10, 1)
def forward(self, x):
return self.dropout(self.linear(x))
model = SimpleDropout()
x = torch.randn(10)
counter = torch._dynamo.testing.CompileCounter()
model = torch.compile(model, backend=counter, fullgraph=True)
for _ in range(20):
model.eval()
model(x)
model.train()
model(x)
self.assertEqual(counter.frame_count, 2)
@patch.object(torch._dynamo.config, "recompile_limit", 2)
def test_no_recursive_compile_after_cache_limit_hit(self):
def f(x, n):
x = x + n
return g(x, n)
def g(x, n):
x = x + n
return h(x, n)
def h(x, n):
return x + n
counter = torch._dynamo.testing.CompileCounter()
opt_f = torch.compile(f, backend=counter, dynamic=False)
for i in range(10):
opt_f(torch.ones(3), i)
self.assertEqual(counter.frame_count, 2)
def test_automatic_dynamic_on_closed_ints(self):
def f(x):
def g(y):
return y + x
return g
counter = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=counter)
def h(x, g):
return g(x)
for i in range(10):
h(torch.randn(5), f(i))
self.assertEqual(counter.frame_count, 2)
@patch.object(torch._dynamo.config, "recompile_limit", 2)
def test_run_mode_after_cache_limit_hit(self):
def f(x, n):
x = x + n
if torch._dynamo.is_compiling():
x = x + 1
return g(x, n)
def g(x, n):
x = x + n
if torch._dynamo.is_compiling():
x = x + 2
return x
counter = torch._dynamo.testing.CompileCounter()
opt_f = torch.compile(f, backend=counter, dynamic=False)
# compiles
self.assertEqual(opt_f(torch.ones(3), 0), torch.ones(3) + 3)
self.assertEqual(opt_f(torch.ones(3), 1), torch.ones(3) + 5)
# cache limit hit
self.assertEqual(opt_f(torch.ones(3), 2), torch.ones(3) + 4)
self.assertEqual(opt_f(torch.ones(3), 3), torch.ones(3) + 6)
# run mode
self.assertEqual(opt_f(torch.ones(3), 0), torch.ones(3) + 3)
self.assertEqual(opt_f(torch.ones(3), 1), torch.ones(3) + 5)
self.assertEqual(counter.frame_count, 2)
@torch._dynamo.config.patch(automatic_dynamic_shapes_mark_as="unbacked")
def test_automatic_dynamic_shapes_mark_as_unbacked(self):
counter = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=counter)
def f(x):
return x * x
f(torch.randn(3))
f(torch.randn(2))
f(torch.randn(1))
f(torch.randn(0))
self.assertEqual(counter.frame_count, 2) # not three or four!
@torch._dynamo.config.patch(automatic_dynamic_shapes_mark_as="oblivious")
def test_automatic_dynamic_shapes_mark_as_oblivious(self):
counter = torch._dynamo.testing.CompileCounter()
def f(x):
if x.size(0) < 10:
return x * 1
else:
return x + 10
opt_f = torch.compile(backend=counter, fullgraph=True)(f)
for i in [3, 2, 1, 0]:
self.assertEqual(f(torch.zeros(i)), opt_f(torch.zeros(i)))
self.assertEqual(counter.frame_count, 2) # not three or four!
@torch._dynamo.config.patch(automatic_dynamic_shapes_mark_as="oblivious")
def test_automatic_dynamic_shapes_mark_as_oblivious_fail_counterfactual(self):
counter = torch._dynamo.testing.CompileCounter()
def f(x):
if x.size(0) < 2:
return x * 1
else:
return x + 10
opt_f = torch.compile(backend=counter, fullgraph=True)(f)
opt_f(torch.randn(1))
with self.assertRaises(torch._dynamo.exc.UserError):
opt_f(torch.randn(0))
def test_ambient_autocast_recompile(self):
weights = torch.randn(10, 10)
counter = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
@torch.compile(backend=counter, fullgraph=True)
def fn(x):
return torch.mm(x, weights)
x = torch.randn(1, 10)
self.assertEqual(fn(x).dtype, torch.float32)
with torch.autocast("cpu", torch.float16):
self.assertEqual(fn(x).dtype, torch.float16)
with torch.autocast("cpu", torch.bfloat16):
self.assertEqual(fn(x).dtype, torch.bfloat16)
# should recompile each time
self.assertEqual(counter.frame_count, 3)
def test_autocast_constant_fold(self):
# test that constant-folded autocast functions
# work properly - it should work if the global autocast
# state is guarded.
weights = torch.randn(10, 10)
counter = torch._dynamo.testing.CompileCounterWithBackend("eager")
def fn(x):
if torch.get_autocast_dtype("cpu") == torch.float16:
x = x + 1
else:
x = x - 1
return torch.mm(x, weights)
opt_fn = torch.compile(fn, backend=counter, fullgraph=True)
x = torch.randn(1, 10)
with torch.autocast("cpu", torch.float16):
self.assertEqual(fn(x), opt_fn(x))
with torch.autocast("cpu", torch.bfloat16):
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(counter.frame_count, 2)
def test_dunder_call_recompile(self):
class Foo:
def __call__(self, x):
return x + 1
counter = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=counter)
def f(x, foo):
return foo(x)
x = torch.ones(2)
foo1 = Foo()
foo2 = Foo()
# no recompilation
f(x, foo1)
f(x, foo2)
self.assertEqual(counter.frame_count, 1)
# one recompilation
Foo.__call__ = lambda self, x: x + 2
f(x, foo1)
self.assertEqual(counter.frame_count, 2)
def test_no_recompile_over_unused_objects(self):
# This is a regression test case that imitates
# https://github.com/city96/ComfyUI-GGUF/blob/47bec6147569a138dd30ad3e14f190a36a3be456/ops.py#L169-L182
counter = torch._dynamo.testing.CompileCounter()
def f(x, key, patches):
return x * x + 1
@torch.compile(backend=counter, fullgraph=True)
def apply_patches(f, x, keys):
patches = []
for key, patch in keys: # noqa: F402
patches.append(patch)
x = f(x, key, patches)
return x
# no recompilation
x = torch.rand(10)
apply_patches(f, x, [("a", 1), ("b", 2)])
self.assertEqual(counter.frame_count, 1)
apply_patches(f, x, [("c", 3), ("d", 4)])
self.assertEqual(counter.frame_count, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| RecompileTests |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 6064,
"end": 8230
} | class ____(BaseSimpleType):
@classmethod
def convert_from_xml(cls, str_value: str) -> dt.datetime:
"""Convert an xsd:dateTime string to a datetime object."""
def parse_xsd_datetime(dt_str: str) -> dt.datetime:
# -- handle trailing 'Z' (Zulu/UTC), common in Word files --
if dt_str.endswith("Z"):
try:
# -- optional fractional seconds case --
return dt.datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%S.%fZ").replace(
tzinfo=dt.timezone.utc
)
except ValueError:
return dt.datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=dt.timezone.utc
)
# -- handles explicit offsets like +00:00, -05:00, or naive datetimes --
try:
return dt.datetime.fromisoformat(dt_str)
except ValueError:
# -- fall-back to parsing as naive datetime (with or without fractional seconds) --
try:
return dt.datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return dt.datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%S")
try:
# -- parse anything reasonable, but never raise, just use default epoch time --
return parse_xsd_datetime(str_value)
except Exception:
return dt.datetime(1970, 1, 1, tzinfo=dt.timezone.utc)
@classmethod
def convert_to_xml(cls, value: dt.datetime) -> str:
# -- convert naive datetime to timezon-aware assuming local timezone --
if value.tzinfo is None:
value = value.astimezone()
# -- convert to UTC if not already --
value = value.astimezone(dt.timezone.utc)
# -- format with 'Z' suffix for UTC --
return value.strftime("%Y-%m-%dT%H:%M:%SZ")
@classmethod
def validate(cls, value: Any) -> None:
if not isinstance(value, dt.datetime):
raise TypeError("only a datetime.datetime object may be assigned, got '%s'" % value)
| ST_DateTime |
python | joerick__pyinstrument | examples/falcon_hello.py | {
"start": 262,
"end": 633
} | class ____:
def __init__(self, interval=0.01):
self.profiler = Profiler(interval=interval)
def process_request(self, req, resp):
self.profiler.start()
def process_response(self, req, resp, resource, req_succeeded):
self.profiler.stop()
self.profiler.open_in_browser() # Autoloads the file in default browser
| ProfilerMiddleware |
python | kamyu104__LeetCode-Solutions | Python/continuous-subarray-sum.py | {
"start": 29,
"end": 523
} | class ____(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
count = 0
lookup = {0: -1}
for i, num in enumerate(nums):
count += num
if k:
count %= k
if count in lookup:
if i - lookup[count] > 1:
return True
else:
lookup[count] = i
return False
| Solution |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/organization_alertrule_detector_index.py | {
"start": 1238,
"end": 3745
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (OrganizationDetectorPermission,)
@extend_schema(
operation_id="Fetch Dual-Written Rule/Alert Rules and Detectors",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
],
responses={
200: AlertRuleDetectorSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Returns a dual-written rule/alert rule and its associated detector.
"""
validator = AlertRuleDetectorValidator(data=request.query_params)
validator.is_valid(raise_exception=True)
rule_id = validator.validated_data.get("rule_id")
alert_rule_id = validator.validated_data.get("alert_rule_id")
detector_id = validator.validated_data.get("detector_id")
queryset = AlertRuleDetector.objects.filter(detector__project__organization=organization)
if detector_id:
queryset = queryset.filter(detector_id=detector_id)
if alert_rule_id:
queryset = queryset.filter(alert_rule_id=alert_rule_id)
if rule_id:
queryset = queryset.filter(rule_id=rule_id)
alert_rule_detector = queryset.first()
if alert_rule_detector:
return Response(serialize(alert_rule_detector, request.user))
# Fallback: if alert_rule_id was provided but no AlertRuleDetector was found,
# try looking up Detector directly using calculated detector_id
if alert_rule_id:
try:
calculated_detector_id = get_object_id_from_fake_id(int(alert_rule_id))
detector = Detector.objects.get(
id=calculated_detector_id, project__organization=organization
)
if detector:
return Response(
{
"detectorId": str(detector.id),
"alertRuleId": str(alert_rule_id),
"ruleId": None,
}
)
except (ValueError, Detector.DoesNotExist):
pass
raise ResourceDoesNotExist
| OrganizationAlertRuleDetectorIndexEndpoint |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 28019,
"end": 28223
} | class ____(BaseModel):
"""Payload for setting rendered_map_index for a task instance."""
rendered_map_index: str
type: Literal["SetRenderedMapIndex"] = "SetRenderedMapIndex"
| SetRenderedMapIndex |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 91604,
"end": 91680
} | class ____(BinOpFrame):
operation = M.eq
_operator_repr = "=="
| EQFrame |
python | sobolevn__django-split-settings | tests/conftest.py | {
"start": 83,
"end": 1303
} | class ____(dict): # noqa: WPS600
"""This class emulates `globals()`, but does not share state in tests."""
def __init__(self, *args, **kwargs):
"""Adding `__file__` to make things work in `tools.py`."""
super().__init__(*args, **kwargs)
self['__file__'] = __file__
# Different util functions:
@pytest.fixture
def scope():
"""This fixture just returns the new instance of the test Scope class."""
return Scope()
@pytest.fixture
def fixture_file():
"""This fixture return a path to the test fixture file."""
return os.path.join(
'settings',
'basic',
'fixture_to_include.py',
)
# Settings files:
@pytest.fixture
def merged():
"""This fixture returns basic merged settings example."""
from tests.settings import merged as _merged # noqa: WPS433
return _merged
@pytest.fixture
def stacked():
"""This fixture returns stacked settings example."""
from tests.settings import stacked as _stacked # noqa: WPS433
return _stacked
@pytest.fixture
def recursion():
"""This fixture returns recursion settings example."""
from tests.settings import recursion as _recursion # noqa: WPS433
return _recursion
| Scope |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/not_previously_skipped_dep.py | {
"start": 1308,
"end": 4154
} | class ____(BaseTIDep):
"""
Determine if this task should be skipped.
Based on any of the task's direct upstream relatives have decided this task should
be skipped.
"""
NAME = "Not Previously Skipped"
IGNORABLE = True
IS_TASK_DEP = True
def _get_dep_statuses(self, ti, session, dep_context):
from airflow.utils.state import TaskInstanceState
upstream = ti.task.get_direct_relatives(upstream=True)
finished_tis = dep_context.ensure_finished_tis(ti.get_dagrun(session), session)
finished_task_ids = {t.task_id for t in finished_tis}
for parent in upstream:
if parent.inherits_from_skipmixin:
if parent.task_id not in finished_task_ids:
# This can happen if the parent task has not yet run.
continue
prev_result = ti.xcom_pull(
task_ids=parent.task_id, key=XCOM_SKIPMIXIN_KEY, session=session, map_indexes=ti.map_index
)
if prev_result is None:
# This can happen if the parent task has not yet run.
continue
should_skip = False
if (
XCOM_SKIPMIXIN_FOLLOWED in prev_result
and ti.task_id not in prev_result[XCOM_SKIPMIXIN_FOLLOWED]
):
# Skip any tasks that are not in "followed"
should_skip = True
elif (
XCOM_SKIPMIXIN_SKIPPED in prev_result
and ti.task_id in prev_result[XCOM_SKIPMIXIN_SKIPPED]
):
# Skip any tasks that are in "skipped"
should_skip = True
if should_skip:
# If the parent SkipMixin has run, and the XCom result stored indicates this
# ti should be skipped, set ti.state to SKIPPED and fail the rule so that the
# ti does not execute.
if dep_context.wait_for_past_depends_before_skipping:
past_depends_met = ti.xcom_pull(
task_ids=ti.task_id, key=PAST_DEPENDS_MET, session=session, default=False
)
if not past_depends_met:
yield self._failing_status(
reason="Task should be skipped but the past depends are not met"
)
return
ti.set_state(TaskInstanceState.SKIPPED, session)
yield self._failing_status(
reason=f"Skipping because of previous XCom result from parent task {parent.task_id}"
)
return
| NotPreviouslySkippedDep |
python | pytorch__pytorch | torch/_higher_order_ops/print.py | {
"start": 239,
"end": 3062
} | class ____(HigherOrderOperator):
"""
print(format_str, **kwargs) -> None
This Higher Order Operator (HOP) provides a functional version of print for use in PyTorch graphs.
It enables format printing with named arguments, e.g., torch._higher_order_ops.print("moo {x} {y}", x=1, y=2).
This HOP enables printing without causing graph break.
"""
def __init__(self) -> None:
super().__init__("print")
def __call__(self, format_str: str, **kwargs: object) -> None:
assert isinstance(format_str, str)
return super().__call__(format_str, **kwargs)
# pyrefly: ignore [bad-override]
def gen_schema(self, format_str: str, **kwargs: object) -> torch.FunctionSchema:
from torch._higher_order_ops.schema import HopSchemaGenerator
schema_gen = HopSchemaGenerator(self)
schema_gen.add_arg("format_str", format_str[0])
# Add each kwarg as a keyword-only argument
for key, value in kwargs.items():
schema_gen.add_arg(key, value, kw_only=True)
schema_gen.add_schema_tree_spec(format_str, **kwargs)
return schema_gen.gen_schema()
print = Print()
@print.py_impl(ProxyTorchDispatchMode)
# pyre-ignore
def print_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode, format_str: str, **kwargs: object
) -> None:
proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) # type: ignore[union-attr] # noqa: F841
mode.tracer.create_proxy("call_function", print, (format_str,), proxy_kwargs)
@print.py_impl(FakeTensorMode)
# pyre-ignore
def print_fake_tensor_mode(mode, format_str: str, **kwargs: object):
return None
@print.py_impl(torch._C.DispatchKey.CompositeExplicitAutograd)
# pyre-ignore
def print_impl(format_str: str, **kwargs: object) -> None:
# Ensure all immutable_dict/list in kwargs are converted to regular dict/list
map_types: dict[type, type] = {
torch.fx.immutable_collections.immutable_dict: dict,
torch.fx.immutable_collections.immutable_list: list,
}
new_kwargs = pytree.tree_map_only(
tuple(map_types.keys()),
lambda a: map_types[type(a)](a),
kwargs,
lambda a: isinstance(a, tuple(map_types.keys())),
)
# Use built-in print to avoid recursion with the HOP print
builtins.print(format_str.format(**new_kwargs))
print.fallthrough(torch._C.DispatchKey.AutogradCPU)
print.fallthrough(torch._C.DispatchKey.AutogradCUDA)
@print.py_functionalize_impl
def print_func(ctx, format_str: str, **kwargs: object):
from torch._higher_order_ops.effects import handle_effects
return handle_effects(
ctx.mode._allow_token_discovery,
ctx.mode._tokens,
print, # type: ignore[arg-type]
(format_str,),
kwargs, # type: ignore[arg-type]
)
| Print |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 7691,
"end": 12951
} | class ____(_MutableDictTestFixture):
run_define_tables = "each"
def setup_mappers(cls):
foo = cls.tables.foo
cls.mapper_registry.map_imperatively(Foo, foo)
def test_coerce_none(self):
sess = fixture_session()
f1 = Foo(data=None)
sess.add(f1)
sess.commit()
eq_(f1.data, None)
def test_coerce_raise(self):
assert_raises_message(
ValueError,
"Attribute 'data' does not accept objects of type",
Foo,
data={1, 2, 3},
)
def test_in_place_mutation(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.commit()
f1.data["a"] = "c"
sess.commit()
eq_(f1.data, {"a": "c"})
def test_modified_event(self):
canary = mock.Mock()
event.listen(Foo.data, "modified", canary)
f1 = Foo(data={"a": "b"})
f1.data["a"] = "c"
eq_(
canary.mock_calls,
[
mock.call(
f1,
attributes.AttributeEventToken(
Foo.data.impl, attributes.OP_MODIFIED
),
)
],
)
def test_clear(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.commit()
f1.data.clear()
sess.commit()
eq_(f1.data, {})
def test_update(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.commit()
f1.data.update({"a": "z"})
sess.commit()
eq_(f1.data, {"a": "z"})
def test_pop(self):
sess = fixture_session()
f1 = Foo(data={"a": "b", "c": "d"})
sess.add(f1)
sess.commit()
eq_(f1.data.pop("a"), "b")
sess.commit()
assert_raises(KeyError, f1.data.pop, "g")
eq_(f1.data, {"c": "d"})
def test_pop_default(self):
sess = fixture_session()
f1 = Foo(data={"a": "b", "c": "d"})
sess.add(f1)
sess.commit()
eq_(f1.data.pop("a", "q"), "b")
eq_(f1.data.pop("a", "q"), "q")
sess.commit()
eq_(f1.data, {"c": "d"})
def test_pop_default_none(self):
sess = fixture_session()
f1 = Foo(data={"a": "b", "c": "d"})
sess.add(f1)
sess.commit()
eq_(f1.data.pop("a", None), "b")
eq_(f1.data.pop("a", None), None)
sess.commit()
eq_(f1.data, {"c": "d"})
def test_popitem(self):
sess = fixture_session()
orig = {"a": "b", "c": "d"}
# the orig dict remains unchanged when we assign,
# but just making this future-proof
data = dict(orig)
f1 = Foo(data=data)
sess.add(f1)
sess.commit()
k, v = f1.data.popitem()
assert k in ("a", "c")
orig.pop(k)
sess.commit()
eq_(f1.data, orig)
def test_setdefault(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.commit()
eq_(f1.data.setdefault("c", "d"), "d")
sess.commit()
eq_(f1.data, {"a": "b", "c": "d"})
eq_(f1.data.setdefault("c", "q"), "d")
sess.commit()
eq_(f1.data, {"a": "b", "c": "d"})
eq_(f1.data.setdefault("w", None), None)
sess.commit()
eq_(f1.data, {"a": "b", "c": "d", "w": None})
def test_replace(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.flush()
f1.data = {"b": "c"}
sess.commit()
eq_(f1.data, {"b": "c"})
def test_replace_itself_still_ok(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.flush()
f1.data = f1.data
f1.data["b"] = "c"
sess.commit()
eq_(f1.data, {"a": "b", "b": "c"})
def test_pickle_parent(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"})
sess.add(f1)
sess.commit()
f1.data
sess.close()
for loads, dumps in picklers():
sess = fixture_session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data["a"] = "c"
assert f2 in sess.dirty
def test_unrelated_flush(self):
sess = fixture_session()
f1 = Foo(data={"a": "b"}, unrelated_data="unrelated")
sess.add(f1)
sess.flush()
f1.unrelated_data = "unrelated 2"
sess.flush()
f1.data["a"] = "c"
sess.commit()
eq_(f1.data["a"], "c")
def _test_non_mutable(self):
sess = fixture_session()
f1 = Foo(non_mutable_data={"a": "b"})
sess.add(f1)
sess.commit()
f1.non_mutable_data["a"] = "c"
sess.commit()
eq_(f1.non_mutable_data, {"a": "b"})
def test_copy(self):
f1 = Foo(data={"a": "b"})
f1.data = copy.copy(f1.data)
eq_(f1.data, {"a": "b"})
def test_deepcopy(self):
f1 = Foo(data={"a": "b"})
f1.data = copy.deepcopy(f1.data)
eq_(f1.data, {"a": "b"})
| _MutableDictTestBase |
python | nedbat__coveragepy | tests/test_concurrency.py | {
"start": 14746,
"end": 23871
} | class ____(CoverageTest):
"""Test support of the multiprocessing module."""
def try_multiprocessing_code(
self,
code: str,
expected_out: str | None,
the_module: ModuleType,
nprocs: int,
start_method: str,
concurrency: str = "multiprocessing",
args: str = "",
) -> None:
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file(
".coveragerc",
f"""\
[run]
concurrency = {concurrency}
source = .
""",
)
cmd = f"coverage run {args} multi.py {start_method}"
_, out = self.run_command_status(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert expected_cant_trace in out
pytest.skip(f"Can't test: {expected_cant_trace}")
else:
assert out.rstrip() == expected_out
assert len(glob.glob(".coverage.*")) == nprocs + 1
out = self.run_command("coverage combine")
out_lines = out.splitlines()
assert len(out_lines) == nprocs + 1
assert all(
re.fullmatch(
r"(Combined data file|Skipping duplicate data) \.coverage\..*\.\d+\.X\w{6}x",
line,
)
for line in out_lines
)
assert len(glob.glob(".coverage.*")) == 0
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_multiprocessing_simple(self, start_method: str) -> None:
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x * x if x % 2 else x * x * x for x in range(upto))
expected_out = f"{nprocs} pids, {total = }"
self.try_multiprocessing_code(
code,
expected_out,
threading,
nprocs,
start_method=start_method,
)
def test_multiprocessing_append(self, start_method: str) -> None:
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x * x if x % 2 else x * x * x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code,
expected_out,
threading,
nprocs,
args="--append",
start_method=start_method,
)
def test_multiprocessing_and_gevent(self, start_method: str) -> None:
nprocs = 3
upto = 30
code = (SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE).format(
NPROCS=nprocs, UPTO=upto
)
total = sum(sum(range((x + 1) * 100)) for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code,
expected_out,
eventlet,
nprocs,
concurrency="multiprocessing,eventlet",
start_method=start_method,
)
@pytest.mark.skipif(
not testenv.CAN_MEASURE_BRANCHES, reason="Can't measure branches with this core"
)
def test_multiprocessing_with_branching(self, start_method: str) -> None:
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x * x if x % 2 else x * x * x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.make_file("multi.py", code)
self.make_file(
"multi.rc",
"""\
[run]
concurrency = multiprocessing
branch = True
omit = */site-packages/*
""",
)
out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}")
assert out.rstrip() == expected_out
out = self.run_command("coverage combine -q") # sneak in a test of -q
assert out == ""
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line)
def test_multiprocessing_bootstrap_error_handling(self) -> None:
# An exception during bootstrapping will be reported.
self.make_file(
"multi.py",
"""\
import multiprocessing
if __name__ == "__main__":
with multiprocessing.Manager():
pass
""",
)
self.make_file(
".coveragerc",
"""\
[run]
concurrency = multiprocessing
_crash = _bootstrap
""",
)
out = self.run_command("coverage run multi.py", status=1)
assert "Exception during multiprocessing bootstrap init" in out
assert "RuntimeError: Crashing because called by _bootstrap" in out
def test_bug_890(self) -> None:
# chdir in multiprocessing shouldn't keep us from finding the
# .coveragerc file.
self.make_file(
"multi.py",
"""\
import multiprocessing, os, os.path
if __name__ == "__main__":
if not os.path.exists("./tmp"): os.mkdir("./tmp")
os.chdir("./tmp")
with multiprocessing.Manager():
pass
print("ok")
""",
)
self.make_file(
".coveragerc",
"""\
[run]
concurrency = multiprocessing
""",
)
out = self.run_command("coverage run multi.py")
assert out.splitlines()[-1] == "ok"
@pytest.mark.skipif(not testenv.SETTRACE_CORE, reason="gettrace is not supported with this core.")
def test_coverage_stop_in_threads() -> None:
has_started_coverage = []
has_stopped_coverage = []
def run_thread() -> None: # pragma: nested
"""Check that coverage is stopping properly in threads."""
deadline = time.time() + 5
ident = threading.current_thread().ident
if sys.gettrace() is not None:
has_started_coverage.append(ident)
while sys.gettrace() is not None:
# Wait for coverage to stop
time.sleep(0.01)
if time.time() > deadline:
return
has_stopped_coverage.append(ident)
cov = coverage.Coverage()
with cov.collect():
t = threading.Thread(target=run_thread)
t.start()
time.sleep(0.1)
t.join()
assert has_started_coverage == [t.ident]
assert has_stopped_coverage == [t.ident]
def test_thread_safe_save_data(tmp_path: pathlib.Path) -> None:
# Non-regression test for: https://github.com/coveragepy/coveragepy/issues/581
# Create some Python modules and put them in the path
modules_dir = tmp_path / "test_modules"
modules_dir.mkdir()
module_names = [f"m{i:03d}" for i in range(1000)]
for module_name in module_names:
(modules_dir / (module_name + ".py")).write_text("def f(): pass\n", encoding="utf-8")
# Shared variables for threads
should_run = [True]
imported = []
old_dir = os.getcwd()
os.chdir(modules_dir)
try:
# Make sure that all dummy modules can be imported.
for module_name in module_names:
import_local_file(module_name)
def random_load() -> None: # pragma: nested
"""Import modules randomly to stress coverage."""
while should_run[0]:
module_name = random.choice(module_names)
mod = import_local_file(module_name)
mod.f()
imported.append(mod)
# Spawn some threads with coverage enabled and attempt to read the
# results right after stopping coverage collection with the threads
# still running.
duration = 0.01
for _ in range(3):
cov = coverage.Coverage()
with cov.collect():
threads = [threading.Thread(target=random_load) for _ in range(10)]
should_run[0] = True
for t in threads:
t.start()
time.sleep(duration)
# The following call used to crash with running background threads.
cov.get_data()
# Stop the threads
should_run[0] = False
for t in threads:
t.join()
if (not imported) and duration < 10: # pragma: only failure
duration *= 2
finally:
os.chdir(old_dir)
should_run[0] = False
@pytest.mark.skipif(env.WINDOWS, reason="SIGTERM doesn't work the same on Windows")
@pytest.mark.flaky(max_runs=3) # Sometimes a test fails due to inherent randomness. Try more times.
| MultiprocessingTest |
python | getsentry__sentry | src/sentry/notifications/utils/__init__.py | {
"start": 14153,
"end": 17089
} | class ____:
problem: PerformanceProblem
spans: list[Span] | None
event: Event | None
def __post_init__(self) -> None:
parent_span, repeating_spans = get_parent_and_repeating_spans(self.spans, self.problem)
self.parent_span = parent_span
self.repeating_spans = repeating_spans
def to_dict(self) -> dict[str, str | float | list[str]]:
return {
"transaction_name": self.transaction,
"parent_span": get_span_evidence_value(self.parent_span),
"repeating_spans": get_span_evidence_value(self.repeating_spans),
"num_repeating_spans": (
str(len(self.problem.offender_span_ids)) if self.problem.offender_span_ids else ""
),
}
@property
def transaction(self) -> str:
if self.event and self.event.transaction:
return str(self.event.transaction)
return ""
@property
def transaction_duration(self) -> float:
if not self.event:
return 0
return self.duration(self.event.data)
def duration(self, item: Mapping[str, Any] | None) -> float:
if not item:
return 0
start = float(item.get("start_timestamp", 0) or 0)
end = float(item.get("timestamp", 0) or 0)
return (end - start) * 1000
def _find_span_by_id(self, id: str) -> Span | None:
if not self.spans:
return None
for span in self.spans:
span_id = span.get("span_id", "") or ""
if span_id == id:
return span
return None
def get_span_duration(self, span: Span | None) -> timedelta:
if span:
return timedelta(seconds=span.get("timestamp", 0) - span.get("start_timestamp", 0))
return timedelta(0)
def _sum_span_duration(self, spans: list[Span | None]) -> float:
"Given non-overlapping spans, find the sum of the span durations in milliseconds"
sum = 0.0
for span in spans:
if span:
sum += self.get_span_duration(span).total_seconds() * 1000
return sum
@classmethod
def from_problem_and_spans(
cls,
problem: PerformanceProblem,
spans: list[Span] | None,
event: Event | None = None,
) -> PerformanceProblemContext:
if problem.type in (
PerformanceNPlusOneAPICallsGroupType,
PerformanceNPlusOneAPICallsExperimentalGroupType,
):
return NPlusOneAPICallProblemContext(problem, spans, event)
if problem.type == PerformanceConsecutiveDBQueriesGroupType:
return ConsecutiveDBQueriesProblemContext(problem, spans, event)
if problem.type == PerformanceRenderBlockingAssetSpanGroupType:
return RenderBlockingAssetProblemContext(problem, spans, event)
else:
return cls(problem, spans, event)
| PerformanceProblemContext |
python | lxml__lxml | src/lxml/tests/test_xslt.py | {
"start": 67131,
"end": 69538
} | class ____(HelperTestCase):
"""XSLT tests for etree under Python 3"""
def test_xslt_result_bytes(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(b'''\
<?xml version="1.0"?>
<foo>B</foo>
''',
bytes(res))
def test_xslt_result_bytearray(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(b'''\
<?xml version="1.0"?>
<foo>B</foo>
''',
bytearray(res))
def test_xslt_result_memoryview(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(b'''\
<?xml version="1.0"?>
<foo>B</foo>
''',
bytes(memoryview(res)))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeXSLTTestCase)])
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeEXSLTTestCase)])
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeXSLTExtFuncTestCase)])
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeXSLTExtElementTestCase)])
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(Py3XSLTTestCase)])
suite.addTests(
[make_doctest('extensions.txt')])
suite.addTests(
[make_doctest('xpathxslt.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| Py3XSLTTestCase |
python | apache__airflow | providers/snowflake/tests/unit/snowflake/decorators/test_snowpark.py | {
"start": 1452,
"end": 8422
} | class ____:
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_decorator_no_param(self, mock_snowflake_hook, dag_maker):
number = 11
@task.snowpark(
task_id=f"{TASK_ID}_1",
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func1(session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
@task.snowpark(
task_id=f"{TASK_ID}_2",
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func2():
return number
with dag_maker(dag_id=TEST_DAG_ID):
_ = [func1(), func2()]
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() == number
assert mock_snowflake_hook.call_count == 2
assert mock_snowflake_hook.return_value.get_snowpark_session.call_count == 2
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_decorator_with_param(self, mock_snowflake_hook, dag_maker):
number = 11
@task.snowpark(
task_id=f"{TASK_ID}_1",
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func1(session: Session, number: int):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
@task.snowpark(
task_id=f"{TASK_ID}_2",
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func2(number: int, session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
@task.snowpark(
task_id=f"{TASK_ID}_3",
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func3(number: int):
return number
with dag_maker(dag_id=TEST_DAG_ID):
_ = [func1(number=number), func2(number=number), func3(number=number)]
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() == number
assert mock_snowflake_hook.call_count == 3
assert mock_snowflake_hook.return_value.get_snowpark_session.call_count == 3
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_decorator_no_return(self, mock_snowflake_hook, dag_maker):
@task.snowpark(
task_id=TASK_ID,
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func(session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
with dag_maker(dag_id=TEST_DAG_ID):
func()
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() is None
mock_snowflake_hook.assert_called_once()
mock_snowflake_hook.return_value.get_snowpark_session.assert_called_once()
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_decorator_multiple_output(self, mock_snowflake_hook, dag_maker, request):
@task.snowpark(
task_id=TASK_ID,
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
multiple_outputs=True,
)
def func(session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return {"a": 1, "b": "2"}
if AIRFLOW_V_3_0_PLUS:
run_task = request.getfixturevalue("run_task")
op = func().operator
run_task(task=op)
assert run_task.xcom.get(key="a") == 1
assert run_task.xcom.get(key="b") == "2"
assert run_task.xcom.get(key="return_value") == {"a": 1, "b": "2"}
else:
with dag_maker(dag_id=TEST_DAG_ID):
func()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
assert ti.xcom_pull(key="a") == 1
assert ti.xcom_pull(key="b") == "2"
assert ti.xcom_pull() == {"a": 1, "b": "2"}
mock_snowflake_hook.assert_called_once()
mock_snowflake_hook.return_value.get_snowpark_session.assert_called_once()
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_decorator_session_tag(self, mock_snowflake_hook, dag_maker):
mock_session = mock_snowflake_hook.return_value.get_snowpark_session.return_value
mock_session.query_tag = {}
# Mock the update_query_tag function to combine with another dict
def update_query_tag(new_tags):
mock_session.query_tag.update(new_tags)
mock_session.update_query_tag = mock.Mock(side_effect=update_query_tag)
@task.snowpark(
task_id=TASK_ID,
snowflake_conn_id=CONN_ID,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
)
def func(session: Session):
return session.query_tag
with dag_maker(dag_id=TEST_DAG_ID):
func()
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
query_tag = ti.xcom_pull()
assert query_tag == {
"dag_id": TEST_DAG_ID,
"dag_run_id": dr.run_id,
"task_id": TASK_ID,
"operator": "_SnowparkDecoratedOperator",
}
| TestSnowparkDecorator |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 1319,
"end": 1466
} | class ____(SentryAPIException):
status_code = status.HTTP_400_BAD_REQUEST
code = "invalid-request"
message = "Invalid request"
| BadRequest |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 243568,
"end": 246694
} | class ____(Operation):
def __init__(self, bins=10, range=None, *, name=None):
super().__init__(name=name)
if not isinstance(bins, int):
raise TypeError("bins must be of type `int`")
if bins < 0:
raise ValueError("`bins` should be a non-negative integer")
if range:
if len(range) < 2 or not isinstance(range, tuple):
raise ValueError("range must be a tuple of two elements")
if range[1] < range[0]:
raise ValueError(
"The second element of range must be greater than the first"
)
self.bins = bins
self.range = range
def call(self, x):
x = backend.convert_to_tensor(x)
if len(x.shape) > 1:
raise ValueError("Input tensor must be 1-dimensional")
return backend.math.histogram(x, bins=self.bins, range=self.range)
def compute_output_spec(self, x):
return (
KerasTensor(shape=(self.bins,), dtype=x.dtype),
KerasTensor(shape=(self.bins + 1,), dtype=x.dtype),
)
@keras_export(["keras.ops.histogram", "keras.ops.numpy.histogram"])
def histogram(x, bins=10, range=None):
"""Computes a histogram of the data tensor `x`.
Args:
x: Input tensor.
bins: An integer representing the number of histogram bins.
Defaults to 10.
range: A tuple representing the lower and upper range of the bins.
If not specified, it will use the min and max of `x`.
Returns:
A tuple containing:
- A tensor representing the counts of elements in each bin.
- A tensor representing the bin edges.
Example:
>>> input_tensor = np.random.rand(8)
>>> keras.ops.histogram(input_tensor)
(array([1, 1, 1, 0, 0, 1, 2, 1, 0, 1], dtype=int32),
array([0.0189519 , 0.10294958, 0.18694726, 0.27094494, 0.35494262,
0.43894029, 0.52293797, 0.60693565, 0.69093333, 0.77493101,
0.85892869]))
"""
if not isinstance(bins, int):
raise TypeError(
f"Argument `bins` must be of type `int`. Received: bins={bins}"
)
if bins < 0:
raise ValueError(
"Argument `bins` should be a non-negative integer. "
f"Received: bins={bins}"
)
if range:
if len(range) < 2 or not isinstance(range, tuple):
raise ValueError(
"Argument `range` must be a tuple of two elements. "
f"Received: range={range}"
)
if range[1] < range[0]:
raise ValueError(
"The second element of `range` must be greater than the first. "
f"Received: range={range}"
)
if any_symbolic_tensors((x,)):
return Histogram(bins=bins, range=range).symbolic_call(x)
x = backend.convert_to_tensor(x)
if len(x.shape) > 1:
raise ValueError(
"Input tensor must be 1-dimensional. "
f"Received: input.shape={x.shape}"
)
return backend.numpy.histogram(x, bins=bins, range=range)
| Histogram |
python | tornadoweb__tornado | tornado/http1connection.py | {
"start": 2085,
"end": 3438
} | class ____:
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`."""
def __init__(
self,
no_keep_alive: bool = False,
chunk_size: Optional[int] = None,
max_header_size: Optional[int] = None,
header_timeout: Optional[float] = None,
max_body_size: Optional[int] = None,
body_timeout: Optional[float] = None,
decompress: bool = False,
) -> None:
"""
:arg bool no_keep_alive: If true, always close the connection after
one request.
:arg int chunk_size: how much data to read into memory at once
:arg int max_header_size: maximum amount of data for HTTP headers
:arg float header_timeout: how long to wait for all headers (seconds)
:arg int max_body_size: maximum amount of data for body
:arg float body_timeout: how long to wait while reading body (seconds)
:arg bool decompress: if true, decode incoming
``Content-Encoding: gzip``
"""
self.no_keep_alive = no_keep_alive
self.chunk_size = chunk_size or 65536
self.max_header_size = max_header_size or 65536
self.header_timeout = header_timeout
self.max_body_size = max_body_size
self.body_timeout = body_timeout
self.decompress = decompress
| HTTP1ConnectionParameters |
python | walkccc__LeetCode | solutions/3277. Maximum XOR Score Subarray Queries/3277.py | {
"start": 0,
"end": 635
} | class ____:
def maximumSubarrayXor(
self,
nums: list[int],
queries: list[list[int]]
) -> list[int]:
n = len(nums)
# xors[i][j] := the XOR score of nums[i..j]
xors = [[0] * n for _ in range(n)]
# dp[i][j] := the maximum XOR score of nums[i..j]
dp = [[0] * n for _ in range(n)]
for i, num in enumerate(nums):
xors[i][i] = num
dp[i][i] = num
for d in range(1, n):
for i in range(n - d):
j = i + d
xors[i][j] = xors[i][j - 1] ^ xors[i + 1][j]
dp[i][j] = max(xors[i][j], dp[i][j - 1], dp[i + 1][j])
return [dp[l][r] for l, r in queries]
| Solution |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 3670,
"end": 5708
} | class ____(Module):
r"""Applies the randomized leaky rectified linear unit function, element-wise.
Method described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network <https://arxiv.org/abs/1505.00853>`_.
The function is defined as:
.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})` during training while during
evaluation :math:`a` is fixed with :math:`a = \frac{\text{lower} + \text{upper}}{2}`.
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/RReLU.png
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lower", "upper", "inplace"]
lower: float
upper: float
inplace: bool
def __init__(
self, lower: float = 1.0 / 8, upper: float = 1.0 / 3, inplace: bool = False
) -> None:
super().__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
inplace_str = ", inplace=True" if self.inplace else ""
return f"lower={self.lower}, upper={self.upper}{inplace_str}"
| RReLU |
python | ray-project__ray | python/ray/serve/handle.py | {
"start": 8827,
"end": 13536
} | class ____:
def __init__(
self,
replica_result_future: Union[
concurrent.futures.Future[ReplicaResult], asyncio.Future[ReplicaResult]
],
request_metadata: RequestMetadata,
_is_router_running_in_separate_loop: bool = True,
):
self._cancelled = False
self._replica_result_future = replica_result_future
self._replica_result: Optional[ReplicaResult] = None
self._request_metadata: RequestMetadata = request_metadata
self._is_router_running_in_separate_loop = _is_router_running_in_separate_loop
@property
def request_id(self) -> str:
return self._request_metadata.request_id
@property
def by_reference(self) -> bool:
return self._request_metadata._by_reference
def _fetch_future_result_sync(
self, _timeout_s: Optional[float] = None
) -> ReplicaResult:
"""Synchronously fetch the replica result.
The result is cached in `self._replica_result`.
"""
if self._replica_result is None:
if not self._is_router_running_in_separate_loop:
raise RuntimeError(
"Sync methods should not be called from within an `asyncio` event "
"loop. Use `await response` instead."
)
try:
self._replica_result = self._replica_result_future.result(
timeout=_timeout_s
)
except concurrent.futures.TimeoutError:
raise TimeoutError("Timed out resolving to ObjectRef.") from None
except concurrent.futures.CancelledError:
raise RequestCancelledError(self.request_id) from None
return self._replica_result
async def _fetch_future_result_async(self) -> ReplicaResult:
"""Asynchronously fetch replica result.
The result is cached in `self._replica_result`..
"""
if self._replica_result is None:
if self._is_router_running_in_separate_loop:
# Use `asyncio.wrap_future` so `self._replica_result_future` can be awaited
# safely from any asyncio loop.
# self._replica_result_future is a object of type concurrent.futures.Future
self._replica_result = await asyncio.wrap_future(
self._replica_result_future
)
else:
# self._replica_result_future is a object of type asyncio.Future
self._replica_result = await self._replica_result_future
return self._replica_result
def cancel(self):
"""Attempt to cancel the `DeploymentHandle` call.
This is best effort.
- If the request hasn't been assigned to a replica, the assignment will be
cancelled.
- If the request has been assigned to a replica, `ray.cancel` will be
called on the object ref, attempting to cancel the request and any downstream
requests it makes.
If the request is successfully cancelled, subsequent operations on the ref will
raise an exception:
- If the request was cancelled before assignment, they'll raise
`asyncio.CancelledError` (or a `concurrent.futures.CancelledError` for
synchronous methods like `.result()`.).
- If the request was cancelled after assignment, they'll raise
`ray.exceptions.TaskCancelledError`.
"""
if self._cancelled:
return
self._cancelled = True
self._replica_result_future.cancel()
if not self._is_router_running_in_separate_loop:
# Given that there is a event loop running, we can't call sync methods.
# Hence optimistically cancel the replica result future and replica result.
if self._replica_result:
self._replica_result.cancel()
return
try:
# try to fetch the results synchronously. if it succeeds,
# we will explicitly cancel the replica result. if it fails,
# the request is already cancelled and we can return early.
self._fetch_future_result_sync()
except RequestCancelledError:
# request is already cancelled nothing to do here
return
self._replica_result.cancel()
@DeveloperAPI
def cancelled(self) -> bool:
"""Whether or not the request has been cancelled.
This is `True` if `.cancel()` is called, but the request may actually have run
to completion.
"""
return self._cancelled
@PublicAPI(stability="stable")
| _DeploymentResponseBase |
python | pytorch__pytorch | torch/_inductor/template_heuristics/aten.py | {
"start": 2396,
"end": 2879
} | class ____(
ATenAddMMConfigHeuristics, GemmMaxAutotuneTemplateConfigHeuristics
):
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
nodes = kernel_inputs.nodes()
# for addmm, bias is the first input
bias = nodes[0]
if bias.get_stride()[0] == 0 and inductor_config.triton.autotune_cublasLt:
yield dict()
| ATenBiasAddMMConfigHeuristics |
python | facebook__pyre-check | scripts/run_server_unsaved_changes_test.py | {
"start": 1014,
"end": 3429
} | class ____(Exception):
pass
@contextlib.contextmanager
def connect_in_text_mode(
socket_path: pathlib.Path,
) -> Generator[Tuple[TextIO, TextIO], None, None]:
"""
This is a line-oriented higher-level API than `connect`. It can be used
when the caller does not want to deal with the complexity of binary I/O.
The behavior is the same as `connect`, except the streams that are created
operates in text mode. Read/write APIs of the streams uses UTF-8 encoded
`str` instead of `bytes`. Those operations are also line-buffered, meaning
that the streams will automatically be flushed once the newline character
is encountered.
"""
with connect(socket_path) as (input_channel, output_channel):
yield (
io.TextIOWrapper(
input_channel,
line_buffering=True,
errors="replace",
),
io.TextIOWrapper(
output_channel,
line_buffering=True,
errors="replace",
),
)
@contextlib.contextmanager
def connect(
socket_path: pathlib.Path,
) -> Generator[Tuple[BinaryIO, BinaryIO], None, None]:
"""
Connect to the socket at given path. Once connected, create an input and
an output stream from the socket. Both the input stream and the output
stream are in raw binary mode: read/write APIs of the streams need to use
`bytes` rather than `str`. The API is intended to be used like this:
```
with connect(socket_path) as (input_stream, output_stream):
# Read from input_stream and write into output_stream here
...
```
Socket creation, connection, and closure will be automatically handled
inside this context manager. If any of the socket operations fail, raise
`ConnectionFailure`.
"""
try:
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client_socket:
client_socket.connect(str(socket_path))
with client_socket.makefile(
mode="rb"
) as input_channel, client_socket.makefile(mode="wb") as output_channel:
yield (input_channel, output_channel)
except OSError as error:
raise ConnectionFailure() from error
def _fetch_commit_paths(repository_path: str) -> Iterable[pathlib.Path]:
return sorted(pathlib.Path(repository_path).iterdir())
| ConnectionFailure |
python | ray-project__ray | rllib/examples/algorithms/classes/appo_w_shared_data_actor.py | {
"start": 839,
"end": 3076
} | class ____(APPO):
def setup(self, config: AlgorithmConfig):
# Call to parent `setup`.
super().setup(config)
# Create shared data actor.
self.shared_data_actor = SharedDataActor.remote()
# Share the actor with all other relevant actors.
def _share(actor, shared_act=self.shared_data_actor):
actor._shared_data_actor = shared_act
# Also add shared actor reference to all the learner connector pieces,
# if applicable.
if hasattr(actor, "_learner_connector") and actor._learner_connector:
for conn in actor._learner_connector:
conn._shared_data_actor = shared_act
self.env_runner_group.foreach_env_runner(func=_share)
if self.eval_env_runner_group:
self.eval_env_runner_group.foreach_env_runner(func=_share)
self.learner_group.foreach_learner(func=_share)
if self._aggregator_actor_manager:
self._aggregator_actor_manager.foreach_actor(func=_share)
def get_state(self, *args, **kwargs):
state = super().get_state(*args, **kwargs)
# Add shared actor's state.
state["shared_data_actor"] = ray.get(self.shared_data_actor.get_state.remote())
return state
def set_state(self, state, *args, **kwargs):
super().set_state(state, *args, **kwargs)
# Set shared actor's state.
if "shared_data_actor" in state:
self.shared_data_actor.set_state.remote(state["shared_data_actor"])
def restore_env_runners(self, env_runner_group: EnvRunnerGroup) -> List[int]:
restored = super().restore_env_runners(env_runner_group)
# For the restored EnvRunners, send them the latest shared, global state
# from the `SharedDataActor`.
for restored_idx in restored:
state_ref = self.shared_data_actor.get.remote(
key=f"EnvRunner_{restored_idx}"
)
env_runner_group.foreach_env_runner(
lambda env_runner, state=state_ref: env_runner._global_state,
remote_worker_ids=[restored_idx],
timeout_seconds=0.0,
)
return restored
| APPOWithSharedDataActor |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/api/test_operations.py | {
"start": 3248,
"end": 3347
} | class ____(BaseModel):
hellos: list[HelloResponse]
total_entries: int
| HelloCollectionResponse |
python | getsentry__sentry | tests/sentry/feedback/lib/test_label_query.py | {
"start": 842,
"end": 6512
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.organization = self.project.organization
def _create_feedback(self, message: str, labels: list[str], dt: datetime | None = None) -> None:
tags = {f"{AI_LABEL_TAG_PREFIX}.label.{i}": labels[i] for i in range(len(labels))}
event = mock_feedback_event(
self.project.id,
message=message,
tags=tags,
dt=dt,
)
create_feedback_issue(event, self.project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE)
def test_get_ai_labels_from_tags_retrieves_labels_correctly(self) -> None:
self._create_feedback(
"a",
["Authentication"],
dt=before_now(days=2),
)
self._create_feedback(
"b",
["Authentication", "Security"],
dt=before_now(days=1),
)
query = Query(
match=Entity(Dataset.IssuePlatform.value),
select=[
_get_ai_labels_from_tags(alias="labels"),
],
where=[
Condition(Column("project_id"), Op.EQ, self.project.id),
Condition(Column("timestamp"), Op.GTE, before_now(days=30)),
Condition(Column("timestamp"), Op.LT, before_now(days=0)),
Condition(Column("occurrence_type_id"), Op.EQ, FeedbackGroup.type_id),
],
orderby=[OrderBy(Column("timestamp"), Direction.ASC)],
)
result = raw_snql_query(
Request(
dataset=Dataset.IssuePlatform.value,
app_id="feedback-backend-web",
query=query,
tenant_ids={"organization_id": self.organization.id},
),
referrer="feedbacks.label_query",
)
assert len(result["data"]) == 2
assert {label for label in result["data"][0]["labels"]} == {"Authentication"}
assert {label for label in result["data"][1]["labels"]} == {"Authentication", "Security"}
def test_query_top_ai_labels_by_feedback_count(self) -> None:
self._create_feedback(
"UI issue 1",
["User Interface", "Performance"],
)
self._create_feedback(
"UI issue 2",
["Checkout", "User Interface"],
)
self._create_feedback(
"UI issue 3",
["Performance", "User Interface", "Colors"],
)
result = query_top_ai_labels_by_feedback_count(
organization_id=self.organization.id,
project_ids=[self.project.id],
start=before_now(days=1),
end=before_now(days=0),
limit=3,
)
assert len(result) == 3
assert result[0]["label"] == "User Interface"
assert result[0]["count"] == 3
assert result[1]["label"] == "Performance"
assert result[1]["count"] == 2
assert result[2]["label"] == "Checkout" or result[2]["label"] == "Colors"
assert result[2]["count"] == 1
def test_query_recent_feedbacks_with_ai_labels(self) -> None:
self._create_feedback(
"The UI is too slow and confusing",
["User Interface"],
dt=before_now(days=3),
)
self._create_feedback(
"The app crashes frequently when loading data",
["Performance"],
dt=before_now(days=2),
)
self._create_feedback(
"Hello",
[],
dt=before_now(days=1),
)
result = query_recent_feedbacks_with_ai_labels(
organization_id=self.organization.id,
project_ids=[self.project.id],
start=before_now(days=30),
end=before_now(days=0),
limit=1,
)
assert result[0] == {
"feedback": "The app crashes frequently when loading data",
"labels": ["Performance"],
}
def test_query_label_group_counts(self) -> None:
self._create_feedback("a", ["User Interface", "Performance"])
self._create_feedback("b", ["Performance", "Authentication"])
self._create_feedback("c", ["Authentication", "Security"])
label_groups_to_expected_result = {
("User Interface",): 1,
("Performance",): 2,
("Security",): 1,
("User Interface", "Performance"): 2,
("Performance", "Security"): 3,
("Authentication", "Performance", "User Interface"): 3,
("Performance", "Authentication", "Security"): 3,
("hello",): 0,
("Performance", "hello"): 2,
}
# Query for feedback counts by label groups
result = query_label_group_counts(
organization_id=self.organization.id,
project_ids=[self.project.id],
start=before_now(days=1),
end=before_now(days=0),
labels_groups=[list(g) for g in label_groups_to_expected_result],
)
assert len(result) == len(label_groups_to_expected_result)
for i, group in enumerate(label_groups_to_expected_result.keys()):
assert result[i] == label_groups_to_expected_result[group]
# Empty label groups should throw a ValueError
with pytest.raises(ValueError):
query_label_group_counts(
organization_id=self.organization.id,
project_ids=[self.project.id],
start=before_now(days=1),
end=before_now(days=0),
labels_groups=[],
)
| TestLabelQuery |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 22955,
"end": 23718
} | class ____(PreTrainedModel):
config_class = BertConfig
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": BertLayer,
"attentions": BertSelfAttention,
"cross_attentions": BertCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, BertLMPredictionHead):
init.zeros_(module.bias)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`BertForPreTraining`].
"""
)
| BertPreTrainedModel |
python | apache__airflow | airflow-core/src/airflow/models/backfill.py | {
"start": 2846,
"end": 3138
} | class ____(AirflowException):
"""
Raised when the quantity of active backfills cannot be determined.
:meta private:
"""
def __init__(self, dag_id: str):
super().__init__(f"Unable to determine the number of active backfills for DAG {dag_id}")
| UnknownActiveBackfills |
python | pallets__itsdangerous | tests/test_itsdangerous/test_serializer.py | {
"start": 686,
"end": 6826
} | class ____:
@pytest.fixture(params=(Serializer, partial(Serializer, serializer=pickle)))
def serializer_factory(self, request):
return partial(request.param, secret_key="secret_key")
@pytest.fixture()
def serializer(self, serializer_factory):
return serializer_factory()
@pytest.fixture()
def value(self):
return {"id": 42}
@pytest.mark.parametrize(
"value", (None, True, "str", "text", [1, 2, 3], {"id": 42})
)
def test_serializer(self, serializer: Serializer, value: Any):
assert serializer.loads(serializer.dumps(value)) == value
@pytest.mark.parametrize(
"transform",
(
lambda s: s.upper(),
lambda s: s + coerce_str(s, "a"),
lambda s: coerce_str(s, "a") + s[1:],
lambda s: s.replace(coerce_str(s, "."), coerce_str(s, "")),
),
)
def test_changed_value(self, serializer: Serializer, value: Any, transform):
signed = serializer.dumps(value)
assert serializer.loads(signed) == value
changed = transform(signed)
with pytest.raises(BadSignature):
serializer.loads(changed)
def test_bad_signature_exception(self, serializer: Serializer, value: Any):
bad_signed = serializer.dumps(value)[:-1]
with pytest.raises(BadSignature) as exc_info:
serializer.loads(bad_signed)
payload = cast(bytes, exc_info.value.payload)
assert serializer.load_payload(payload) == value
def test_bad_payload_exception(self, serializer: Serializer, value: Any):
original = serializer.dumps(value)
payload = original.rsplit(coerce_str(original, "."), 1)[0] # type: ignore
bad = serializer.make_signer().sign(payload[:-1])
with pytest.raises(BadPayload) as exc_info:
serializer.loads(bad)
assert exc_info.value.original_error is not None
def test_loads_unsafe(self, serializer: Serializer, value: Any):
signed = serializer.dumps(value)
assert serializer.loads_unsafe(signed) == (True, value)
bad_signed = signed[:-1]
assert serializer.loads_unsafe(bad_signed) == (False, value)
payload = signed.rsplit(coerce_str(signed, "."), 1)[0] # type: ignore
bad_payload = serializer.make_signer().sign(payload[:-1])[:-1]
assert serializer.loads_unsafe(bad_payload) == (False, None)
class BadUnsign(serializer.signer): # type: ignore
def unsign(self, signed_value, *args, **kwargs):
try:
return super().unsign(signed_value, *args, **kwargs)
except BadSignature as e:
e.payload = None
raise
serializer.signer = BadUnsign
assert serializer.loads_unsafe(bad_signed) == (False, None)
def test_file(self, serializer: Serializer, value: Any):
f = cast(
IO, BytesIO() if isinstance(serializer.dumps(value), bytes) else StringIO()
)
serializer.dump(value, f)
f.seek(0)
assert serializer.load(f) == value
f.seek(0)
assert serializer.load_unsafe(f) == (True, value)
def test_alt_salt(self, serializer: Serializer, value: Any):
signed = serializer.dumps(value, salt="other")
with pytest.raises(BadSignature):
serializer.loads(signed)
assert serializer.loads(signed, salt="other") == value
def test_signer_cls(self, serializer_factory, serializer: Serializer, value: Any):
class Other(serializer.signer): # type: ignore
default_key_derivation = "hmac"
other = serializer_factory(signer=Other)
assert other.loads(other.dumps(value)) == value
assert other.dumps(value) != serializer.dumps(value)
def test_signer_kwargs(
self, serializer_factory, serializer: Serializer, value: Any
):
other = serializer_factory(signer_kwargs={"key_derivation": "hmac"})
assert other.loads(other.dumps(value)) == value
assert other.dumps("value") != serializer.dumps("value")
def test_serializer_kwargs(self, serializer_factory):
serializer = serializer_factory(serializer_kwargs={"skipkeys": True})
try:
serializer.serializer.dumps(None, skipkeys=True)
except TypeError:
return
assert serializer.loads(serializer.dumps({(): 1})) == {}
def test_fallback_signers(self, serializer_factory, value: Any):
serializer = serializer_factory(signer_kwargs={"digest_method": hashlib.sha256})
signed = serializer.dumps(value)
fallback_serializer = serializer_factory(
signer_kwargs={"digest_method": hashlib.sha1},
fallback_signers=[{"digest_method": hashlib.sha256}],
)
assert fallback_serializer.loads(signed) == value
def test_iter_unsigners(self, serializer: Serializer, serializer_factory):
class Signer256(serializer.signer): # type: ignore
default_digest_method = hashlib.sha256
serializer = serializer_factory(
secret_key="secret_key",
fallback_signers=[
{"digest_method": hashlib.sha256},
(Signer, {"digest_method": hashlib.sha256}),
Signer256,
],
)
unsigners = serializer.iter_unsigners()
assert next(unsigners).digest_method == _lazy_sha1
for signer in unsigners:
assert signer.digest_method == hashlib.sha256
def test_digests():
factory = partial(Serializer, secret_key="dev key", salt="dev salt")
default_value = factory(signer_kwargs={}).dumps([42])
sha1_value = factory(signer_kwargs={"digest_method": hashlib.sha1}).dumps([42])
sha512_value = factory(signer_kwargs={"digest_method": hashlib.sha512}).dumps([42])
assert default_value == sha1_value
assert sha1_value == "[42].-9cNi0CxsSB3hZPNCe9a2eEs1ZM"
assert sha512_value == (
"[42].MKCz_0nXQqv7wKpfHZcRtJRmpT2T5uvs9YQsJEhJimqxc"
"9bCLxG31QzS5uC8OVBI1i6jyOLAFNoKaF5ckO9L5Q"
)
| TestSerializer |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 2261,
"end": 2411
} | class ____(Value):
combinable = True
def __init__(self, v):
self.v = str(v)
def to_string(self):
return self.v
| ScalarValue |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing_engines.py | {
"start": 1597,
"end": 3515
} | class ____:
params = [
_get_numeric_engines(),
["monotonic_incr", "monotonic_decr", "non_monotonic"],
[True, False],
[10**5, 2 * 10**6], # 2e6 is above SIZE_CUTOFF
]
param_names = ["engine_and_dtype", "index_type", "unique", "N"]
def setup(self, engine_and_dtype, index_type, unique, N):
engine, dtype = engine_and_dtype
if (
index_type == "non_monotonic"
and dtype in [np.int16, np.int8, np.uint8]
and unique
):
# Values overflow
raise NotImplementedError
if index_type == "monotonic_incr":
if unique:
arr = np.arange(N * 3, dtype=dtype)
else:
arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
elif index_type == "monotonic_decr":
if unique:
arr = np.arange(N * 3, dtype=dtype)[::-1]
else:
arr = np.array([3, 2, 1], dtype=dtype).repeat(N)
else:
assert index_type == "non_monotonic"
if unique:
arr = np.empty(N * 3, dtype=dtype)
arr[:N] = np.arange(N * 2, N * 3, dtype=dtype)
arr[N:] = np.arange(N * 2, dtype=dtype)
else:
arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
self.data = engine(arr)
# code below avoids populating the mapping etc. while timing.
self.data.get_loc(2)
self.key_middle = arr[len(arr) // 2]
self.key_early = arr[2]
def time_get_loc(self, engine_and_dtype, index_type, unique, N):
self.data.get_loc(self.key_early)
def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
# searchsorted performance may be different near the middle of a range
# vs near an endpoint
self.data.get_loc(self.key_middle)
| NumericEngineIndexing |
python | getsentry__sentry | src/sentry/hybridcloud/services/control_organization_provisioning/model.py | {
"start": 46,
"end": 216
} | class ____(RpcModel):
id: int
organization_id: int
user_id: int | None
slug: str
region_name: str
reservation_type: int
| RpcOrganizationSlugReservation |
python | walkccc__LeetCode | solutions/1199. Minimum Time to Build Blocks/1199.py | {
"start": 0,
"end": 321
} | class ____:
def minBuildTime(self, blocks: list[int], split: int) -> int:
minHeap = blocks.copy()
heapify(minHeap)
while len(minHeap) > 1:
heapq.heappop(minHeap) # the minimum
x = heapq.heappop(minHeap) # the second minimum
heapq.heappush(minHeap, x + split)
return minHeap[0]
| Solution |
python | getsentry__sentry | tests/sentry/tasks/test_collect_project_platforms.py | {
"start": 216,
"end": 1153
} | class ____(TestCase):
def test_simple(self) -> None:
now = timezone.now()
organization = self.create_organization(name="foo")
project1 = self.create_project(organization=organization, name="foo", slug="foo")
project2 = self.create_project(organization=organization, name="bar", slug="bar")
self.create_group(project=project1, last_seen=now, platform="php")
self.create_group(project=project1, last_seen=now, platform="perl")
self.create_group(project=project2, last_seen=now, platform="python")
with self.tasks():
collect_project_platforms(1)
assert ProjectPlatform.objects.filter(project_id=project1.id, platform="php").exists()
assert ProjectPlatform.objects.filter(project_id=project1.id, platform="perl").exists()
assert ProjectPlatform.objects.filter(project_id=project2.id, platform="python").exists()
| CollectProjectPlatformsTest |
python | ray-project__ray | python/ray/_private/accelerators/rbln.py | {
"start": 315,
"end": 2533
} | class ____(AcceleratorManager):
"""Rebellions RBLN accelerators."""
@staticmethod
def get_resource_name() -> str:
return "RBLN"
@staticmethod
def get_visible_accelerator_ids_env_var() -> str:
return RBLN_RT_VISIBLE_DEVICES_ENV_VAR
@staticmethod
def get_current_process_visible_accelerator_ids() -> Optional[List[str]]:
visible_devices = os.environ.get(
RBLNAcceleratorManager.get_visible_accelerator_ids_env_var()
)
if visible_devices is None:
return None
if visible_devices == "":
return []
return visible_devices.split(",")
@staticmethod
def get_current_node_num_accelerators() -> int:
"""Detects the number of RBLN devices on the current machine."""
try:
from rebel import device_count
return device_count()
except Exception as e:
logger.debug("Could not detect RBLN devices: %s", e)
return 0
@staticmethod
def get_current_node_accelerator_type() -> Optional[str]:
"""Gets the type of RBLN NPU on the current node."""
try:
from rebel import get_npu_name
return get_npu_name()
except Exception as e:
logger.exception("Failed to detect RBLN NPU type: %s", e)
return None
@staticmethod
def validate_resource_request_quantity(
quantity: float,
) -> Tuple[bool, Optional[str]]:
if isinstance(quantity, float) and not quantity.is_integer():
return (
False,
f"{RBLNAcceleratorManager.get_resource_name()} resource quantity"
" must be whole numbers. "
f"The specified quantity {quantity} is invalid.",
)
else:
return (True, None)
@staticmethod
def set_current_process_visible_accelerator_ids(
visible_rbln_devices: List[str],
) -> None:
if not os.getenv(NOSET_RBLN_RT_VISIBLE_DEVICES_ENV_VAR):
os.environ[
RBLNAcceleratorManager.get_visible_accelerator_ids_env_var()
] = ",".join(map(str, visible_rbln_devices))
| RBLNAcceleratorManager |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 77985,
"end": 78404
} | class ____(BaseTest):
def test_target_data_from_tm(self):
tm = self.target_machine(jit=False)
td = tm.target_data
mod = self.module()
gv_i32 = mod.get_global_variable("glob")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
self.assertEqual(td.get_abi_size(gv_i32.type), pointer_size)
| TestTargetMachine |
python | facelessuser__soupsieve | tests/test_level4/test_user_invalid.py | {
"start": 52,
"end": 604
} | class ____(util.TestCase):
"""Test invalid selectors."""
def test_user_invalid(self):
"""Test user invalid (matches nothing)."""
markup = """
<form id="form">
<input id="1" type="text">
</form>
"""
self.assert_selector(
markup,
"input:user-invalid",
[],
flags=util.HTML
)
self.assert_selector(
markup,
"input:not(:user-invalid)",
["1"],
flags=util.HTML
)
| TestInvalid |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gitlab/unit_tests/test_config_migrations.py | {
"start": 424,
"end": 2034
} | class ____:
@pytest.mark.parametrize(
"key_to_migrate, migrated_key",
[
("groups", "groups_list"),
("projects", "projects_list"),
],
ids=["groups-migration", "projects-migration"],
)
def test_migrations(self, key_to_migrate, migrated_key):
try:
with open(TEST_CONFIG_PATH, "r") as f:
config: Mapping[str, Any] = json.load(f)
assert config[key_to_migrate] == "a b c"
assert not config.get(migrated_key)
source = get_source(config=config, config_path=TEST_CONFIG_PATH)
migrated_config = source.configure(config=config, temp_dir="/not/a/real/path")
assert migrated_config[migrated_key] == ["a", "b", "c"]
finally:
with open(TEST_CONFIG_PATH, "w") as f:
json.dump(config, f)
def test_given_no_key_to_migrate_then_no_migration_is_performed(self):
try:
with open(TEST_CONFIG_PATH, "r") as f:
config: Mapping[str, Any] = json.load(f)
copy_config = dict(config)
copy_config["groups"] = None
source = get_source(config=copy_config, config_path=TEST_CONFIG_PATH)
migrated_config = source.configure(config=copy_config, temp_dir="/not/a/real/path")
assert not migrated_config.get("groups_list")
assert migrated_config["groups"] == None
assert migrated_config.get("projects_list")
finally:
with open(TEST_CONFIG_PATH, "w") as f:
json.dump(config, f)
| TestMigrations |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_uninstalled.py | {
"start": 442,
"end": 2750
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration = self.create_integration(
organization=self.organization,
external_id="connection:123",
provider="bitbucket",
metadata={
"public_key": "public-key",
"base_url": "https://api.bitbucket.org",
"shared_secret": "a-big-secret",
"domain_name": "bitbucket.org/test-org",
"icon": "https://bitbucket.org/account/test-org/avatar/",
"scopes": ["issue:write", "pullrequest", "webhook", "repository"],
"uuid": "u-u-i-d",
"type": "team",
},
)
self.install = self.integration.get_installation(self.organization.id)
self.path = reverse("sentry-extensions-bitbucket-uninstalled")
self.repository = self.create_repo(
project=self.project,
provider="integrations:bitbucket",
integration_id=self.integration.id,
)
def test_uninstall_missing_auth_header(self) -> None:
response = self.client.post(self.path)
assert response.status_code == 400
self.repository.refresh_from_db()
assert self.repository.id
@patch("sentry.integrations.bitbucket.uninstalled.get_integration_from_jwt")
def test_uninstall_missing_integration(self, mock_jwt: MagicMock) -> None:
mock_jwt.side_effect = AtlassianConnectValidationError("missing integration")
response = self.client.post(self.path, HTTP_AUTHORIZATION="JWT fake-jwt")
assert response.status_code == 400
self.repository.refresh_from_db()
assert self.repository.id
assert self.repository.status == ObjectStatus.ACTIVE
@patch("sentry.integrations.bitbucket.uninstalled.get_integration_from_jwt")
def test_uninstall_success(self, mock_jwt: MagicMock) -> None:
mock_jwt.return_value = serialize_integration(self.integration)
response = self.client.post(self.path, HTTP_AUTHORIZATION="JWT fake-jwt")
assert response.status_code == 200
self.repository.refresh_from_db()
assert self.repository.id
assert self.repository.status == ObjectStatus.DISABLED
| BitbucketUnistalledEndpointTest |
python | etianen__django-reversion | reversion/migrations/0001_squashed_0004_auto_20160611_1202.py | {
"start": 157,
"end": 2783
} | class ____(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(db_index=True, help_text='The date and time this revision was created.', verbose_name='date created')),
('comment', models.TextField(blank=True, help_text='A text comment on this revision.', verbose_name='comment')),
('user', models.ForeignKey(blank=True, help_text='The user who created this revision.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
"ordering": ("-pk",),
'verbose_name': 'revision',
'verbose_name_plural': 'revisions',
},
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.CharField(help_text='Primary key of the model under version control.', max_length=191)),
('format', models.CharField(help_text='The serialization format used by this model.', max_length=255)),
('serialized_data', models.TextField(help_text='The serialized form of this version of the model.')),
('object_repr', models.TextField(help_text='A string representation of the object.')),
('content_type', models.ForeignKey(help_text='Content type of the model under version control.', on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('revision', models.ForeignKey(help_text='The revision that contains this version.', on_delete=django.db.models.deletion.CASCADE, to='reversion.Revision')),
('db', models.CharField(help_text='The database the model under version control is stored in.', max_length=191)),
],
options={
"ordering": ("-pk",),
'verbose_name': 'version',
'verbose_name_plural': 'versions',
},
),
migrations.AlterUniqueTogether(
name='version',
unique_together={('db', 'content_type', 'object_id', 'revision')},
),
]
| Migration |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/contrib/regular_languages/completion.py | {
"start": 315,
"end": 3468
} | class ____(Completer):
"""
Completer which can be used for autocompletion according to variables in
the grammar. Each variable can have a different autocompleter.
:param compiled_grammar: `GrammarCompleter` instance.
:param completers: `dict` mapping variable names of the grammar to the
`Completer` instances to be used for each variable.
"""
def __init__(
self, compiled_grammar: _CompiledGrammar, completers: dict[str, Completer]
) -> None:
self.compiled_grammar = compiled_grammar
self.completers = completers
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
m = self.compiled_grammar.match_prefix(document.text_before_cursor)
if m:
yield from self._remove_duplicates(
self._get_completions_for_match(m, complete_event)
)
def _get_completions_for_match(
self, match: Match, complete_event: CompleteEvent
) -> Iterable[Completion]:
"""
Yield all the possible completions for this input string.
(The completer assumes that the cursor position was at the end of the
input string.)
"""
for match_variable in match.end_nodes():
varname = match_variable.varname
start = match_variable.start
completer = self.completers.get(varname)
if completer:
text = match_variable.value
# Unwrap text.
unwrapped_text = self.compiled_grammar.unescape(varname, text)
# Create a document, for the completions API (text/cursor_position)
document = Document(unwrapped_text, len(unwrapped_text))
# Call completer
for completion in completer.get_completions(document, complete_event):
new_text = (
unwrapped_text[: len(text) + completion.start_position]
+ completion.text
)
# Wrap again.
yield Completion(
text=self.compiled_grammar.escape(varname, new_text),
start_position=start - len(match.string),
display=completion.display,
display_meta=completion.display_meta,
)
def _remove_duplicates(self, items: Iterable[Completion]) -> Iterable[Completion]:
"""
Remove duplicates, while keeping the order.
(Sometimes we have duplicates, because the there several matches of the
same grammar, each yielding similar completions.)
"""
def hash_completion(completion: Completion) -> tuple[str, int]:
return completion.text, completion.start_position
yielded_so_far: set[tuple[str, int]] = set()
for completion in items:
hash_value = hash_completion(completion)
if hash_value not in yielded_so_far:
yielded_so_far.add(hash_value)
yield completion
| GrammarCompleter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.