language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-core/src/airflow/utils/thread_safe_dict.py | {
"start": 840,
"end": 1635
} | class ____:
"""Dictionary that uses a lock during operations, to ensure thread safety."""
def __init__(self):
self.sync_dict = {}
self.thread_lock = threading.Lock()
def set(self, key, value):
with self.thread_lock:
self.sync_dict[key] = value
def get(self, key):
with self.thread_lock:
return self.sync_dict.get(key)
def delete(self, key):
with self.thread_lock:
if key in self.sync_dict:
del self.sync_dict[key]
def clear(self):
with self.thread_lock:
self.sync_dict.clear()
def get_all(self):
with self.thread_lock:
# Return a copy to avoid exposing the internal dictionary.
return self.sync_dict.copy()
| ThreadSafeDict |
python | kamyu104__LeetCode-Solutions | Python/count-of-integers.py | {
"start": 70,
"end": 991
} | class ____(object):
def count(self, num1, num2, min_sum, max_sum):
"""
:type num1: str
:type num2: str
:type min_sum: int
:type max_sum: int
:rtype: int
"""
MOD = 10**9+7
def f(x):
dp = [[0]*(max_sum+1) for _ in xrange(2)]
dp[0][0] = dp[1][0] = 1
for i in reversed(xrange(len(x))):
new_dp = [[0]*(max_sum+1) for _ in xrange(2)]
for t in xrange(2):
for total in xrange(max_sum+1):
for d in xrange(min((int(x[i]) if t else 9), total)+1):
new_dp[t][total] = (new_dp[t][total]+dp[int(t and d == int(x[i]))][total-d])%MOD
dp = new_dp
return reduce(lambda x, y: (x+y)%MOD, (dp[1][total] for total in xrange(min_sum, max_sum+1)))
return (f(num2)-f(str(int(num1)-1)))%MOD
| Solution |
python | python-excel__xlwt | tests/test_biff_records.py | {
"start": 119,
"end": 506
} | class ____(unittest.TestCase):
def test_shared_string_table(self):
expected_result = b'\xfc\x00\x11\x00\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x01\x1e\x04;\x04O\x04'
string_record = xlwt.BIFFRecords.SharedStringTable(encoding='cp1251')
string_record.add_str('Оля')
self.assertEqual(expected_result, string_record.get_biff_record())
| TestSharedStringTable |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/exception.py | {
"start": 872,
"end": 991
} | class ____(UnityException):
"""
Related to errors with side channels.
"""
pass
| UnitySideChannelException |
python | numba__numba | numba/cuda/tests/cudapy/test_vectorize_complex.py | {
"start": 185,
"end": 548
} | class ____(CUDATestCase):
def test_vectorize_complex(self):
@vectorize(['complex128(complex128)'], target='cuda')
def vcomp(a):
return a * a + 1.
A = np.arange(5, dtype=np.complex128)
B = vcomp(A)
self.assertTrue(np.allclose(A * A + 1., B))
if __name__ == '__main__':
unittest.main()
| TestVectorizeComplex |
python | spyder-ide__spyder | spyder/api/plugins/new_api.py | {
"start": 32327,
"end": 47862
} | class ____(SpyderPluginV2):
"""
A Spyder plugin to enhance functionality with a dockable widget.
"""
# ---- API: Mandatory attributes
# -------------------------------------------------------------------------
# This is the main widget of the dockable plugin.
# It needs to be a subclass of PluginMainWidget.
WIDGET_CLASS = None
# ---- API: Optional attributes
# -------------------------------------------------------------------------
# Define a list of plugins next to which we want to to tabify this plugin.
# Example: ['Plugins.Editor']
TABIFY = []
# Disable actions in Spyder main menus when the plugin is not visible
DISABLE_ACTIONS_WHEN_HIDDEN = True
# Raise and focus on switch to plugin calls.
# If False, the widget will be raised but focus will not be given until
# the action to switch is called a second time.
RAISE_AND_FOCUS = False
# Whether the plugin can handle file actions.
# If set to true, then the `create_new_file`, `open_last_closed_file`,
# `save_file`, `save_file_as`, `save_copy_as`, `save_all`, `revert_file`,
# `close_file` and `close_all` functions will be called to handle the
# corresponding actions. Individual actions can be disabled with
# `enable_file_action` in the Applications plugin.
CAN_HANDLE_FILE_ACTIONS = False
# List of file extensions which the plugin can open.
# If the user opens a file with one of these extensions, then the file
# will open in this plugin using the `open_file` function.
# Example: ['.ipynb'] for spyder-notebook
FILE_EXTENSIONS = []
# Whether the plugin can handle edit actions.
# If set to True, then the `undo`, `redo`, `cut`, `copy`, `paste` and
# `select_all` functions will be called to handle the corresponding
# actions. Individual actions can be disabled with `enable_edit_action` in
# the Applications plugin.
CAN_HANDLE_EDIT_ACTIONS = False
# Whether the plugin can handle search actions.
# If set to True, then the `find`, `find_next`, `find_previous` and
# `replace` functions will be called to handle the corresponding
# actions. Individual actions can be disabled with `enable_search_action`
# in the Applications plugin.
CAN_HANDLE_SEARCH_ACTIONS = False
# ---- API: Available signals
# -------------------------------------------------------------------------
sig_focus_changed = Signal()
"""
This signal is emitted to inform the focus of this plugin has changed.
"""
sig_toggle_view_changed = Signal(bool)
"""
This action is emitted to inform the visibility of a dockable plugin
has changed.
This is triggered by checking/unchecking the entry for a pane in the
`Window > Panes` menu.
Parameters
----------
visible: bool
New visibility of the dockwidget.
"""
sig_switch_to_plugin_requested = Signal(object, bool)
"""
This signal can be emitted to inform the main window that this plugin
requested to be displayed.
Notes
-----
This is automatically connected to main container/widget at plugin's
registration.
"""
sig_update_ancestor_requested = Signal()
"""
This signal is emitted to inform the main window that a child widget
needs its ancestor to be updated.
"""
# ---- Private methods
# -------------------------------------------------------------------------
def __init__(self, parent, configuration):
if not issubclass(self.WIDGET_CLASS, PluginMainWidget):
raise SpyderAPIError(
'A SpyderDockablePlugin must define a valid WIDGET_CLASS '
'attribute!')
self.CONTAINER_CLASS = self.WIDGET_CLASS
super().__init__(parent, configuration=configuration)
# Shortcut to switch to the plugin. It's defined on the main window
self._switch_to_shortcut = None
# Widget setup
# --------------------------------------------------------------------
self._widget = self._container
widget = self._widget
if widget is None:
raise SpyderAPIError(
'A dockable plugin must define a WIDGET_CLASS!')
if not isinstance(widget, PluginMainWidget):
raise SpyderAPIError(
'The WIDGET_CLASS of a dockable plugin must be a subclass of '
'PluginMainWidget!')
widget.DISABLE_ACTIONS_WHEN_HIDDEN = self.DISABLE_ACTIONS_WHEN_HIDDEN
widget.RAISE_AND_FOCUS = self.RAISE_AND_FOCUS
widget.set_icon(self.get_icon())
widget.set_name(self.NAME)
# Render all toolbars as a final separate step on the main window
# in case some plugins want to extend a toolbar. Since the rendering
# can only be done once!
widget.render_toolbars()
# Default Signals
# --------------------------------------------------------------------
widget.sig_toggle_view_changed.connect(self.sig_toggle_view_changed)
widget.sig_update_ancestor_requested.connect(
self.sig_update_ancestor_requested)
# ---- API: Optional methods to override
# -------------------------------------------------------------------------
def create_new_file(self) -> None:
"""
Create a new file inside the plugin.
This function will be called if the user creates a new file using
the `File > New` menu item or the "New file" button in the main
toolbar, and `CAN_HANDLE_FILE_ACTIONS` is set to `True`.
"""
raise NotImplementedError
def open_file(self, filename: str):
"""
Open file inside plugin.
This method will be called if the user wants to open a file with one
of the file name extensions listed in `FILE_EXTENSIONS`, so you need
to define that variable too.
Parameters
----------
filename: str
The name of the file to be opened.
"""
raise NotImplementedError
def get_current_filename(self) -> Optional[str]:
"""
Return file name of the file that is currently displayed.
This is meant for plugins like the Editor or Notebook plugin which
can edit or display files. Return `None` if no file is displayed or if
this does not display files.
This function is used in the `Open file` action to initialize the
"Open file" dialog.
"""
return None
def current_file_is_temporary(self) -> bool:
"""
Return whether the currently displayed file is a temporary file.
This function should only be called if a file is displayed, that is,
if `self.get_current_filename()` does not return `None`.
"""
return False
def open_last_closed_file(self) -> None:
"""
Open the last closed file again.
This function will be called if the `File > Open last closed` menu item
is selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS`
is set to `True`.
"""
raise NotImplementedError
def save_file(self) -> None:
"""
Save the current file.
This function will be called if the user saves the current file using
the `File > Save` menu item or the "Save file" button in the main
toolbar, the plugin has focus, and `CAN_HANDLE_FILE_ACTIONS` is set to
`True`.
"""
raise NotImplementedError
def save_file_as(self) -> None:
"""
Save the current file under a different name.
This function will be called if the `File > Save as` menu item is
selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def save_copy_as(self) -> None:
"""
Save a copy of the current file under a different name.
This function will be called if the `File > Save copy as` menu item is
selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def save_all(self) -> None:
"""
Save all files that are opened in the plugin.
This function will be called if the user saves all files using the
`File > Save all` menu item or the "Save all" button in the main
toolbar, the plugin has focus, and `CAN_HANDLE_FILE_ACTIONS` is set to
`True`.
"""
raise NotImplementedError
def revert_file(self) -> None:
"""
Revert the current file to the version stored on disk.
This function will be called if the `File > Revert` menu item is
selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def close_file(self) -> None:
"""
Close the current file.
This function will be called if the `File > Close` menu item is
selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def close_all(self) -> None:
"""
Close all opened files.
This function will be called if the `File > Close all` menu item is
selected while the plugin has focus and `CAN_HANDLE_FILE_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def undo(self) -> None:
"""
Undo last edition.
This function will be called if the `Edit > Undo` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def redo(self) -> None:
"""
Redo last edition.
This function will be called if the `Edit > Redo` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def cut(self) -> None:
"""
Cut selection.
This function will be called if the `Edit > Cut` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def copy(self) -> None:
"""
Copy selection.
This function will be called if the `Edit > Copy` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def paste(self) -> None:
"""
Paste clipboard.
This function will be called if the `Edit > Paste` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def select_all(self) -> None:
"""
Select all text in the plugin.
This function will be called if the `Edit > Select All` menu item is
selected while the plugin has focus and `CAN_HANDLE_EDIT_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def find(self) -> None:
"""
Find text in the plugin.
This function will be called if the `Search > Find text` menu item is
selected while the plugin has focus and `CAN_HANDLE_SEARCH_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def find_next(self) -> None:
"""
Move to next find text occurrence in the plugin.
This function will be called if the `Search > Find next` menu item is
selected while the plugin has focus and `CAN_HANDLE_SEARCH_ACTIONS` is
set to `True`.
"""
raise NotImplementedError
def find_previous(self) -> None:
"""
Move to previous find text occurrence in the plugin.
This function will be called if the `Search > Find previous` menu item
is selected while the plugin has focus and `CAN_HANDLE_SEARCH_ACTIONS`
is set to `True`.
"""
raise NotImplementedError
def replace(self) -> None:
"""
Replace text occurrence in the plugin.
This function will be called if the `Search > Replace text` menu item
is selected while the plugin has focus and `CAN_HANDLE_SEARCH_ACTIONS`
is set to `True`.
"""
raise NotImplementedError
# ---- API: available methods
# -------------------------------------------------------------------------
def before_long_process(self, message):
"""
Show a message in main window's status bar, change the mouse pointer
to Qt.WaitCursor and start spinner when starting a long process.
Parameters
----------
message: str
Message to show in the status bar when the long process starts.
"""
self.get_widget().start_spinner()
super().before_long_process(message)
def after_long_process(self, message=""):
"""
Clear main window's status bar after a long process, restore mouse
pointer to the OS deault and stop spinner.
Parameters
----------
message: str
Message to show in the status bar when the long process finishes.
"""
super().after_long_process(message)
self.get_widget().stop_spinner()
def get_widget(self) -> PluginMainWidget:
"""
Return the plugin main widget.
"""
if self._widget is None:
raise SpyderAPIError('Dockable Plugin must have a WIDGET_CLASS!')
return self._widget
def update_title(self):
"""
Update plugin title, i.e. dockwidget or window title.
"""
self.get_widget().update_title()
def update_margins(self, margin):
"""
Update margins of main widget inside dockable plugin.
"""
self.get_widget().update_margins(margin)
@Slot()
def switch_to_plugin(self, force_focus=False):
"""
Switch to plugin and define if focus should be given or not.
"""
if self.get_widget().windowwidget is None:
self.sig_switch_to_plugin_requested.emit(self, force_focus)
def set_ancestor(self, ancestor_widget):
"""
Update the ancestor/parent of child widgets when undocking.
"""
self.get_widget().set_ancestor(ancestor_widget)
# ---- Convenience methods from the widget exposed on the plugin
# -------------------------------------------------------------------------
@property
def dockwidget(self):
return self.get_widget().dockwidget
@property
def options_menu(self):
return self.get_widget().get_options_menu()
@property
def toggle_view_action(self):
return self.get_widget().toggle_view_action
def create_dockwidget(self, mainwindow):
return self.get_widget().create_dockwidget(mainwindow)
def create_window(self):
self.get_widget().create_window()
def close_window(self, save_undocked=False):
self.get_widget()._close_window(save_undocked=save_undocked)
def change_visibility(self, state, force_focus=False):
self.get_widget().change_visibility(state, force_focus)
def toggle_view(self, value):
self.get_widget().toggle_view(value)
| SpyderDockablePlugin |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 176446,
"end": 179106
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3, 3]"):
l_x_ = L_x_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
diff_args: "f32[3, 3, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
_set_tensor_requires_grad: "f32[3, 3, 3]" = torch._functorch.eager_transforms._set_tensor_requires_grad(diff_args); _set_tensor_requires_grad = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
sin: "f32[3, 3, 3]" = diff_args.sin()
add: "f32[3, 3, 3]" = sin + 3.14; sin = None
output: "f32[]" = add.sum(); add = None
aux: "f32[3, 3, 3]" = diff_args.cos()
_autograd_grad = torch._functorch.eager_transforms._autograd_grad((output,), [diff_args], create_graph = True); diff_args = None
grad_input: "f32[3, 3, 3]" = _autograd_grad[0]; _autograd_grad = None
grad_input_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(grad_input, 1); grad_input = None
output_1: "f32[]" = torch._C._functorch._unwrap_for_grad(output, 1); output = output_1 = None
aux_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(aux, 1); aux = None
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (grad_input_1, aux_1)
""",
)
def test_grad_two_tensor_has_aux(self):
counters.clear()
def fn(x, y):
return ((x.sin() + y).sum(), x.cos())
def wrapper_fn(x, y):
return torch.func.grad(fn, has_aux=True)(x, y)
y = torch.randn(3, 3, 3)
x = torch.randn(3, 3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x, y))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | ray-project__ray | python/ray/util/metrics.py | {
"start": 8304,
"end": 10869
} | class ____(Metric):
"""Tracks the size and number of events in buckets.
Histograms allow you to calculate aggregate quantiles
such as 25, 50, 95, 99 percentile latency for an RPC.
This corresponds to Prometheus' histogram metric:
https://prometheus.io/docs/concepts/metric_types/#histogram
Args:
name: Name of the metric.
description: Description of the metric.
boundaries: Boundaries of histogram buckets.
tag_keys: Tag keys of the metric.
"""
def __init__(
self,
name: str,
description: str = "",
boundaries: List[float] = None,
tag_keys: Optional[Tuple[str, ...]] = None,
):
super().__init__(name, description, tag_keys)
if boundaries is None or len(boundaries) == 0:
raise ValueError(
"boundaries argument should be provided when using "
"the Histogram class. e.g., "
'Histogram("name", boundaries=[1.0, 2.0])'
)
for i, boundary in enumerate(boundaries):
if boundary <= 0:
raise ValueError(
"Invalid `boundaries` argument at index "
f"{i}, {boundaries}. Use positive values for the arguments."
)
self.boundaries = boundaries
if self._discard_metric:
self._metric = None
else:
self._metric = CythonHistogram(
self._name, self._description, self.boundaries, self._tag_keys
)
def observe(self, value: Union[int, float], tags: Dict[str, str] = None):
"""Observe a given `value` and add it to the appropriate bucket.
Tags passed in will take precedence over the metric's default tags.
Args:
value(int, float): Value to set the gauge to.
tags(Dict[str, str]): Tags to set or override for this gauge.
"""
if not isinstance(value, (int, float)):
raise TypeError(f"value must be int or float, got {type(value)}.")
self._record(value, tags)
def __reduce__(self):
deserializer = Histogram
serialized_data = (
self._name,
self._description,
self.boundaries,
self._tag_keys,
)
return deserializer, serialized_data
@property
def info(self):
"""Return information about histogram metric."""
info = super().info
info.update({"boundaries": self.boundaries})
return info
@DeveloperAPI
| Histogram |
python | doocs__leetcode | solution/0400-0499/0435.Non-overlapping Intervals/Solution.py | {
"start": 0,
"end": 303
} | class ____:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: x[1])
ans = len(intervals)
pre = -inf
for l, r in intervals:
if pre <= l:
ans -= 1
pre = r
return ans
| Solution |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 40359,
"end": 41497
} | class ____(JsProxy, Exception):
"""A JavaScript Error.
These are pickleable unlike other JsProxies.
"""
# Note: Unlike many of these classes, this one is never actually seen by the
# user IN_BROWSER (it's replaced by a different JsException in
# pyodide._core). We use it to unpickle errors so we need it to be
# instantiable.
def __new__(cls, *args, **kwargs):
if args[0] == _instantiate_token:
return super().__new__(cls, *args, **kwargs)
return cls._new_exc(*args, **kwargs)
@classmethod
def _new_exc(cls, name: str, message: str = "", stack: str = "") -> "JsException":
result = super().__new__(JsException, _instantiate_token)
result.name = name
result.message = message
result.stack = stack
return result
@classmethod
def new(cls, *args: Any) -> "JsException":
return cls()
def __str__(self):
return f"{self.name}: {self.message}"
name: str
"""The name of the error type"""
message: str
"""The error message"""
stack: str
"""The JavaScript stack trace"""
| JsException |
python | sympy__sympy | sympy/parsing/mathematica.py | {
"start": 6134,
"end": 44146
} | class ____:
"""
An instance of this class converts a string of a Wolfram Mathematica
expression to a SymPy expression.
The main parser acts internally in three stages:
1. tokenizer: tokenizes the Mathematica expression and adds the missing *
operators. Handled by ``_from_mathematica_to_tokens(...)``
2. full form list: sort the list of strings output by the tokenizer into a
syntax tree of nested lists and strings, equivalent to Mathematica's
``FullForm`` expression output. This is handled by the function
``_from_tokens_to_fullformlist(...)``.
3. SymPy expression: the syntax tree expressed as full form list is visited
and the nodes with equivalent classes in SymPy are replaced. Unknown
syntax tree nodes are cast to SymPy ``Function`` objects. This is
handled by ``_from_fullformlist_to_sympy(...)``.
"""
# left: Mathematica, right: SymPy
CORRESPONDENCES = {
'Sqrt[x]': 'sqrt(x)',
'Rational[x,y]': 'Rational(x,y)',
'Exp[x]': 'exp(x)',
'Log[x]': 'log(x)',
'Log[x,y]': 'log(y,x)',
'Log2[x]': 'log(x,2)',
'Log10[x]': 'log(x,10)',
'Mod[x,y]': 'Mod(x,y)',
'Max[*x]': 'Max(*x)',
'Min[*x]': 'Min(*x)',
'Pochhammer[x,y]':'rf(x,y)',
'ArcTan[x,y]':'atan2(y,x)',
'ExpIntegralEi[x]': 'Ei(x)',
'SinIntegral[x]': 'Si(x)',
'CosIntegral[x]': 'Ci(x)',
'AiryAi[x]': 'airyai(x)',
'AiryAiPrime[x]': 'airyaiprime(x)',
'AiryBi[x]' :'airybi(x)',
'AiryBiPrime[x]' :'airybiprime(x)',
'LogIntegral[x]':' li(x)',
'PrimePi[x]': 'primepi(x)',
'Prime[x]': 'prime(x)',
'PrimeQ[x]': 'isprime(x)'
}
# trigonometric, e.t.c.
for arc, tri, h in product(('', 'Arc'), (
'Sin', 'Cos', 'Tan', 'Cot', 'Sec', 'Csc'), ('', 'h')):
fm = arc + tri + h + '[x]'
if arc: # arc func
fs = 'a' + tri.lower() + h + '(x)'
else: # non-arc func
fs = tri.lower() + h + '(x)'
CORRESPONDENCES.update({fm: fs})
REPLACEMENTS = {
' ': '',
'^': '**',
'{': '[',
'}': ']',
}
RULES = {
# a single whitespace to '*'
'whitespace': (
re.compile(r'''
(?:(?<=[a-zA-Z\d])|(?<=\d\.)) # a letter or a number
\s+ # any number of whitespaces
(?:(?=[a-zA-Z\d])|(?=\.\d)) # a letter or a number
''', re.VERBOSE),
'*'),
# add omitted '*' character
'add*_1': (
re.compile(r'''
(?:(?<=[])\d])|(?<=\d\.)) # ], ) or a number
# ''
(?=[(a-zA-Z]) # ( or a single letter
''', re.VERBOSE),
'*'),
# add omitted '*' character (variable letter preceding)
'add*_2': (
re.compile(r'''
(?<=[a-zA-Z]) # a letter
\( # ( as a character
(?=.) # any characters
''', re.VERBOSE),
'*('),
# convert 'Pi' to 'pi'
'Pi': (
re.compile(r'''
(?:
\A|(?<=[^a-zA-Z])
)
Pi # 'Pi' is 3.14159... in Mathematica
(?=[^a-zA-Z])
''', re.VERBOSE),
'pi'),
}
# Mathematica function name pattern
FM_PATTERN = re.compile(r'''
(?:
\A|(?<=[^a-zA-Z]) # at the top or a non-letter
)
[A-Z][a-zA-Z\d]* # Function
(?=\[) # [ as a character
''', re.VERBOSE)
# list or matrix pattern (for future usage)
ARG_MTRX_PATTERN = re.compile(r'''
\{.*\}
''', re.VERBOSE)
# regex string for function argument pattern
ARGS_PATTERN_TEMPLATE = r'''
(?:
\A|(?<=[^a-zA-Z])
)
{arguments} # model argument like x, y,...
(?=[^a-zA-Z])
'''
# will contain transformed CORRESPONDENCES dictionary
TRANSLATIONS: dict[tuple[str, int], dict[str, Any]] = {}
# cache for a raw users' translation dictionary
cache_original: dict[tuple[str, int], dict[str, Any]] = {}
# cache for a compiled users' translation dictionary
cache_compiled: dict[tuple[str, int], dict[str, Any]] = {}
@classmethod
def _initialize_class(cls):
# get a transformed CORRESPONDENCES dictionary
d = cls._compile_dictionary(cls.CORRESPONDENCES)
cls.TRANSLATIONS.update(d)
def __init__(self, additional_translations=None):
self.translations = {}
# update with TRANSLATIONS (class constant)
self.translations.update(self.TRANSLATIONS)
if additional_translations is None:
additional_translations = {}
# check the latest added translations
if self.__class__.cache_original != additional_translations:
if not isinstance(additional_translations, dict):
raise ValueError('The argument must be dict type')
# get a transformed additional_translations dictionary
d = self._compile_dictionary(additional_translations)
# update cache
self.__class__.cache_original = additional_translations
self.__class__.cache_compiled = d
# merge user's own translations
self.translations.update(self.__class__.cache_compiled)
@classmethod
def _compile_dictionary(cls, dic):
# for return
d = {}
for fm, fs in dic.items():
# check function form
cls._check_input(fm)
cls._check_input(fs)
# uncover '*' hiding behind a whitespace
fm = cls._apply_rules(fm, 'whitespace')
fs = cls._apply_rules(fs, 'whitespace')
# remove whitespace(s)
fm = cls._replace(fm, ' ')
fs = cls._replace(fs, ' ')
# search Mathematica function name
m = cls.FM_PATTERN.search(fm)
# if no-hit
if m is None:
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# get Mathematica function name like 'Log'
fm_name = m.group()
# get arguments of Mathematica function
args, end = cls._get_args(m)
# function side check. (e.g.) '2*Func[x]' is invalid.
if m.start() != 0 or end != len(fm):
err = "'{f}' function form is invalid.".format(f=fm)
raise ValueError(err)
# check the last argument's 1st character
if args[-1][0] == '*':
key_arg = '*'
else:
key_arg = len(args)
key = (fm_name, key_arg)
# convert '*x' to '\\*x' for regex
re_args = [x if x[0] != '*' else '\\' + x for x in args]
# for regex. Example: (?:(x|y|z))
xyz = '(?:(' + '|'.join(re_args) + '))'
# string for regex compile
patStr = cls.ARGS_PATTERN_TEMPLATE.format(arguments=xyz)
pat = re.compile(patStr, re.VERBOSE)
# update dictionary
d[key] = {}
d[key]['fs'] = fs # SymPy function template
d[key]['args'] = args # args are ['x', 'y'] for example
d[key]['pat'] = pat
return d
def _convert_function(self, s):
'''Parse Mathematica function to SymPy one'''
# compiled regex object
pat = self.FM_PATTERN
scanned = '' # converted string
cur = 0 # position cursor
while True:
m = pat.search(s)
if m is None:
# append the rest of string
scanned += s
break
# get Mathematica function name
fm = m.group()
# get arguments, and the end position of fm function
args, end = self._get_args(m)
# the start position of fm function
bgn = m.start()
# convert Mathematica function to SymPy one
s = self._convert_one_function(s, fm, args, bgn, end)
# update cursor
cur = bgn
# append converted part
scanned += s[:cur]
# shrink s
s = s[cur:]
return scanned
def _convert_one_function(self, s, fm, args, bgn, end):
# no variable-length argument
if (fm, len(args)) in self.translations:
key = (fm, len(args))
# x, y,... model arguments
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = dict(zip(x_args, args))
# with variable-length argument
elif (fm, '*') in self.translations:
key = (fm, '*')
# x, y,..*args (model arguments)
x_args = self.translations[key]['args']
# make CORRESPONDENCES between model arguments and actual ones
d = {}
for i, x in enumerate(x_args):
if x[0] == '*':
d[x] = ','.join(args[i:])
break
d[x] = args[i]
# out of self.translations
else:
err = "'{f}' is out of the whitelist.".format(f=fm)
raise ValueError(err)
# template string of converted function
template = self.translations[key]['fs']
# regex pattern for x_args
pat = self.translations[key]['pat']
scanned = ''
cur = 0
while True:
m = pat.search(template)
if m is None:
scanned += template
break
# get model argument
x = m.group()
# get a start position of the model argument
xbgn = m.start()
# add the corresponding actual argument
scanned += template[:xbgn] + d[x]
# update cursor to the end of the model argument
cur = m.end()
# shrink template
template = template[cur:]
# update to swapped string
s = s[:bgn] + scanned + s[end:]
return s
@classmethod
def _get_args(cls, m):
'''Get arguments of a Mathematica function'''
s = m.string # whole string
anc = m.end() + 1 # pointing the first letter of arguments
square, curly = [], [] # stack for brackets
args = []
# current cursor
cur = anc
for i, c in enumerate(s[anc:], anc):
# extract one argument
if c == ',' and (not square) and (not curly):
args.append(s[cur:i]) # add an argument
cur = i + 1 # move cursor
# handle list or matrix (for future usage)
if c == '{':
curly.append(c)
elif c == '}':
curly.pop()
# seek corresponding ']' with skipping irrevant ones
if c == '[':
square.append(c)
elif c == ']':
if square:
square.pop()
else: # empty stack
args.append(s[cur:i])
break
# the next position to ']' bracket (the function end)
func_end = i + 1
return args, func_end
@classmethod
def _replace(cls, s, bef):
aft = cls.REPLACEMENTS[bef]
s = s.replace(bef, aft)
return s
@classmethod
def _apply_rules(cls, s, bef):
pat, aft = cls.RULES[bef]
return pat.sub(aft, s)
@classmethod
def _check_input(cls, s):
for bracket in (('[', ']'), ('{', '}'), ('(', ')')):
if s.count(bracket[0]) != s.count(bracket[1]):
err = "'{f}' function form is invalid.".format(f=s)
raise ValueError(err)
if '{' in s:
err = "Currently list is not supported."
raise ValueError(err)
def _parse_old(self, s):
# input check
self._check_input(s)
# uncover '*' hiding behind a whitespace
s = self._apply_rules(s, 'whitespace')
# remove whitespace(s)
s = self._replace(s, ' ')
# add omitted '*' character
s = self._apply_rules(s, 'add*_1')
s = self._apply_rules(s, 'add*_2')
# translate function
s = self._convert_function(s)
# '^' to '**'
s = self._replace(s, '^')
# 'Pi' to 'pi'
s = self._apply_rules(s, 'Pi')
# '{', '}' to '[', ']', respectively
# s = cls._replace(s, '{') # currently list is not taken into account
# s = cls._replace(s, '}')
return s
def parse(self, s):
s2 = named_characters_to_unicode(s)
s3 = self._from_mathematica_to_tokens(s2)
s4 = self._from_tokens_to_fullformlist(s3)
s5 = self._from_fullformlist_to_sympy(s4)
return s5
INFIX = "Infix"
PREFIX = "Prefix"
POSTFIX = "Postfix"
FLAT = "Flat"
RIGHT = "Right"
LEFT = "Left"
_mathematica_op_precedence: list[tuple[str, str | None, dict[str, str | Callable]]] = [
(POSTFIX, None, {";": lambda x: x + ["Null"] if isinstance(x, list) and x and x[0] == "CompoundExpression" else ["CompoundExpression", x, "Null"]}),
(INFIX, FLAT, {";": "CompoundExpression"}),
(INFIX, RIGHT, {"=": "Set", ":=": "SetDelayed", "+=": "AddTo", "-=": "SubtractFrom", "*=": "TimesBy", "/=": "DivideBy"}),
(INFIX, LEFT, {"//": lambda x, y: [x, y]}),
(POSTFIX, None, {"&": "Function"}),
(INFIX, LEFT, {"/.": "ReplaceAll"}),
(INFIX, RIGHT, {"->": "Rule", ":>": "RuleDelayed"}),
(INFIX, LEFT, {"/;": "Condition"}),
(INFIX, FLAT, {"|": "Alternatives"}),
(POSTFIX, None, {"..": "Repeated", "...": "RepeatedNull"}),
(INFIX, FLAT, {"||": "Or"}),
(INFIX, FLAT, {"&&": "And"}),
(PREFIX, None, {"!": "Not"}),
(INFIX, FLAT, {"===": "SameQ", "=!=": "UnsameQ"}),
(INFIX, FLAT, {"==": "Equal", "!=": "Unequal", "<=": "LessEqual", "<": "Less", ">=": "GreaterEqual", ">": "Greater"}),
(INFIX, None, {";;": "Span"}),
(INFIX, FLAT, {"+": "Plus", "-": "Plus"}),
(INFIX, FLAT, {"*": "Times", "/": "Times"}),
(INFIX, FLAT, {".": "Dot"}),
(PREFIX, None, {"-": lambda x: MathematicaParser._get_neg(x),
"+": lambda x: x}),
(INFIX, RIGHT, {"^": "Power"}),
(INFIX, RIGHT, {"@@": "Apply", "/@": "Map", "//@": "MapAll", "@@@": lambda x, y: ["Apply", x, y, ["List", "1"]]}),
(POSTFIX, None, {"'": "Derivative", "!": "Factorial", "!!": "Factorial2", "--": "Decrement"}),
(INFIX, None, {"[": lambda x, y: [x, *y], "[[": lambda x, y: ["Part", x, *y]}),
(PREFIX, None, {"{": lambda x: ["List", *x], "(": lambda x: x[0]}),
(INFIX, None, {"?": "PatternTest"}),
(POSTFIX, None, {
"_": lambda x: ["Pattern", x, ["Blank"]],
"_.": lambda x: ["Optional", ["Pattern", x, ["Blank"]]],
"__": lambda x: ["Pattern", x, ["BlankSequence"]],
"___": lambda x: ["Pattern", x, ["BlankNullSequence"]],
}),
(INFIX, None, {"_": lambda x, y: ["Pattern", x, ["Blank", y]]}),
(PREFIX, None, {"#": "Slot", "##": "SlotSequence"}),
]
_missing_arguments_default = {
"#": lambda: ["Slot", "1"],
"##": lambda: ["SlotSequence", "1"],
}
# This regex matches any valid python identifier -- excluding
# underscores, which Mathematica uses to denote patterns, and
# therefore can't be part of a variable name. The regex has the
# form "[a][b]*", where `a` is the set of characters that can
# start an identifier, and `b` is the set of characters that can
# continue an identifier, which may also include numbers and
# unicode combining characters.
_literal = (
"["
+ _literal_character_ranges(True)
+ "]["
+ _literal_character_ranges(False)
+ "]*"
)
_number = r"(?:[0-9]+(?:\.[0-9]*)?|\.[0-9]+)"
_enclosure_open = ["(", "[", "[[", "{"]
_enclosure_close = [")", "]", "]]", "}"]
@classmethod
def _get_neg(cls, x):
return f"-{x}" if isinstance(x, str) and re.match(MathematicaParser._number, x) else ["Times", "-1", x]
@classmethod
def _get_inv(cls, x):
return ["Power", x, "-1"]
_regex_tokenizer = None
def _get_tokenizer(self):
if self._regex_tokenizer is not None:
# Check if the regular expression has already been compiled:
return self._regex_tokenizer
tokens = [self._literal, self._number]
tokens_escape = self._enclosure_open[:] + self._enclosure_close[:]
for typ, strat, symdict in self._mathematica_op_precedence:
for k in symdict:
tokens_escape.append(k)
tokens_escape.sort(key=lambda x: -len(x))
tokens.extend(map(re.escape, tokens_escape))
tokens.append(",")
tokens.append("\n")
tokenizer = re.compile("(" + "|".join(tokens) + ")")
self._regex_tokenizer = tokenizer
return self._regex_tokenizer
def _from_mathematica_to_tokens(self, code: str):
tokenizer = self._get_tokenizer()
# Find strings:
code_splits: list[str | list] = []
while True:
string_start = code.find("\"")
if string_start == -1:
if len(code) > 0:
code_splits.append(code)
break
match_end = re.search(r'(?<!\\)"', code[string_start+1:])
if match_end is None:
raise SyntaxError('mismatch in string " " expression')
string_end = string_start + match_end.start() + 1
if string_start > 0:
code_splits.append(code[:string_start])
code_splits.append(["_Str", code[string_start+1:string_end].replace('\\"', '"')])
code = code[string_end+1:]
# Remove comments:
for i, code_split in enumerate(code_splits):
if isinstance(code_split, list):
continue
while True:
pos_comment_start = code_split.find("(*")
if pos_comment_start == -1:
break
pos_comment_end = code_split.find("*)")
if pos_comment_end == -1 or pos_comment_end < pos_comment_start:
raise SyntaxError("mismatch in comment (* *) code")
code_split = code_split[:pos_comment_start] + code_split[pos_comment_end+2:]
code_splits[i] = code_split
# Tokenize the input strings with a regular expression:
def token_split(code):
if isinstance(code, str):
m = tokenizer.findall(code)
if m or code.isascii():
return m
return [code]
token_lists = [token_split(code) for code in code_splits]
tokens = [j for i in token_lists for j in i]
# Remove newlines at the beginning
while tokens and tokens[0] == "\n":
tokens.pop(0)
# Remove newlines at the end
while tokens and tokens[-1] == "\n":
tokens.pop(-1)
return tokens
def _is_op(self, token: str | list) -> bool:
if isinstance(token, list):
return False
if re.match(self._literal, token):
return False
if re.match("-?" + self._number, token):
return False
return True
def _is_valid_star1(self, token: str | list) -> bool:
if token in (")", "}"):
return True
return not self._is_op(token)
def _is_valid_star2(self, token: str | list) -> bool:
if token in ("(", "{"):
return True
return not self._is_op(token)
def _from_tokens_to_fullformlist(self, tokens: list):
stack: list[list] = [[]]
open_seq = []
pointer: int = 0
while pointer < len(tokens):
token = tokens[pointer]
if token in self._enclosure_open:
stack[-1].append(token)
open_seq.append(token)
stack.append([])
elif token == ",":
if len(stack[-1]) == 0 and stack[-2][-1] == open_seq[-1]:
raise SyntaxError("%s cannot be followed by comma ," % open_seq[-1])
stack[-1] = self._parse_after_braces(stack[-1])
stack.append([])
elif token in self._enclosure_close:
ind = self._enclosure_close.index(token)
if self._enclosure_open[ind] != open_seq[-1]:
unmatched_enclosure = SyntaxError("unmatched enclosure")
if token == "]]" and open_seq[-1] == "[":
if open_seq[-2] == "[":
# These two lines would be logically correct, but are
# unnecessary:
# token = "]"
# tokens[pointer] = "]"
tokens.insert(pointer+1, "]")
elif open_seq[-2] == "[[":
if tokens[pointer+1] == "]":
tokens[pointer+1] = "]]"
elif tokens[pointer+1] == "]]":
tokens[pointer+1] = "]]"
tokens.insert(pointer+2, "]")
else:
raise unmatched_enclosure
else:
raise unmatched_enclosure
if len(stack[-1]) == 0 and stack[-2][-1] == "(":
raise SyntaxError("( ) not valid syntax")
last_stack = self._parse_after_braces(stack[-1], True)
stack[-1] = last_stack
new_stack_element = []
while stack[-1][-1] != open_seq[-1]:
new_stack_element.append(stack.pop())
new_stack_element.reverse()
if open_seq[-1] == "(" and len(new_stack_element) != 1:
raise SyntaxError("( must be followed by one expression, %i detected" % len(new_stack_element))
stack[-1].append(new_stack_element)
open_seq.pop(-1)
else:
stack[-1].append(token)
pointer += 1
if len(stack) != 1:
raise RuntimeError("Stack should have only one element")
return self._parse_after_braces(stack[0])
def _util_remove_newlines(self, lines: list, tokens: list, inside_enclosure: bool):
pointer = 0
size = len(tokens)
while pointer < size:
token = tokens[pointer]
if token == "\n":
if inside_enclosure:
# Ignore newlines inside enclosures
tokens.pop(pointer)
size -= 1
continue
if pointer == 0:
tokens.pop(0)
size -= 1
continue
if pointer > 1:
try:
prev_expr = self._parse_after_braces(tokens[:pointer], inside_enclosure)
except SyntaxError:
tokens.pop(pointer)
size -= 1
continue
else:
prev_expr = tokens[0]
if len(prev_expr) > 0 and prev_expr[0] == "CompoundExpression":
lines.extend(prev_expr[1:])
else:
lines.append(prev_expr)
for i in range(pointer):
tokens.pop(0)
size -= pointer
pointer = 0
continue
pointer += 1
def _util_add_missing_asterisks(self, tokens: list):
size: int = len(tokens)
pointer: int = 0
while pointer < size:
if (pointer > 0 and
self._is_valid_star1(tokens[pointer - 1]) and
self._is_valid_star2(tokens[pointer])):
# This is a trick to add missing * operators in the expression,
# `"*" in op_dict` makes sure the precedence level is the same as "*",
# while `not self._is_op( ... )` makes sure this and the previous
# expression are not operators.
if tokens[pointer] == "(":
# ( has already been processed by now, replace:
tokens[pointer] = "*"
tokens[pointer + 1] = tokens[pointer + 1][0]
else:
tokens.insert(pointer, "*")
pointer += 1
size += 1
pointer += 1
def _parse_after_braces(self, tokens: list, inside_enclosure: bool = False):
op_dict: dict
changed: bool = False
lines: list = []
self._util_remove_newlines(lines, tokens, inside_enclosure)
for op_type, grouping_strat, op_dict in reversed(self._mathematica_op_precedence):
if "*" in op_dict:
self._util_add_missing_asterisks(tokens)
size: int = len(tokens)
pointer: int = 0
while pointer < size:
token = tokens[pointer]
if isinstance(token, str) and token in op_dict:
op_name: str | Callable = op_dict[token]
node: list
first_index: int
if isinstance(op_name, str):
node = [op_name]
first_index = 1
else:
node = []
first_index = 0
if token in ("+", "-") and op_type == self.PREFIX and pointer > 0 and not self._is_op(tokens[pointer - 1]):
# Make sure that PREFIX + - don't match expressions like a + b or a - b,
# the INFIX + - are supposed to match that expression:
pointer += 1
continue
if op_type == self.INFIX:
if pointer == 0 or pointer == size - 1 or self._is_op(tokens[pointer - 1]) or self._is_op(tokens[pointer + 1]):
pointer += 1
continue
# Special case: "!" without preceding operand is PREFIX Not, not POSTFIX Factorial
if token == "!" and op_type == self.POSTFIX:
if pointer == 0 or self._is_op(tokens[pointer - 1]):
pointer += 1
continue
changed = True
tokens[pointer] = node
if op_type == self.INFIX:
arg1 = tokens.pop(pointer-1)
arg2 = tokens.pop(pointer)
if token == "/":
arg2 = self._get_inv(arg2)
elif token == "-":
arg2 = self._get_neg(arg2)
pointer -= 1
size -= 2
node.append(arg1)
node_p = node
if grouping_strat == self.FLAT:
while pointer + 2 < size and self._check_op_compatible(tokens[pointer+1], token):
node_p.append(arg2)
other_op = tokens.pop(pointer+1)
arg2 = tokens.pop(pointer+1)
if other_op == "/":
arg2 = self._get_inv(arg2)
elif other_op == "-":
arg2 = self._get_neg(arg2)
size -= 2
node_p.append(arg2)
elif grouping_strat == self.RIGHT:
while pointer + 2 < size and tokens[pointer+1] == token:
node_p.append([op_name, arg2])
node_p = node_p[-1]
tokens.pop(pointer+1)
arg2 = tokens.pop(pointer+1)
size -= 2
node_p.append(arg2)
elif grouping_strat == self.LEFT:
while pointer + 1 < size and tokens[pointer+1] == token:
if isinstance(op_name, str):
node_p[first_index] = [op_name, node_p[first_index], arg2]
else:
node_p[first_index] = op_name(node_p[first_index], arg2)
tokens.pop(pointer+1)
arg2 = tokens.pop(pointer+1)
size -= 2
node_p.append(arg2)
else:
node.append(arg2)
elif op_type == self.PREFIX:
if grouping_strat is not None:
raise TypeError("'Prefix' op_type should not have a grouping strat")
if pointer == size - 1 or self._is_op(tokens[pointer + 1]):
tokens[pointer] = self._missing_arguments_default[token]()
else:
node.append(tokens.pop(pointer+1))
size -= 1
elif op_type == self.POSTFIX:
if grouping_strat is not None:
raise TypeError("'Prefix' op_type should not have a grouping strat")
if pointer == 0 or self._is_op(tokens[pointer - 1]):
tokens[pointer] = self._missing_arguments_default[token]()
else:
node.append(tokens.pop(pointer-1))
pointer -= 1
size -= 1
if isinstance(op_name, Callable): # type: ignore
op_call: Callable = typing.cast(Callable, op_name)
new_node = op_call(*node)
node.clear()
if isinstance(new_node, list):
node.extend(new_node)
else:
tokens[pointer] = new_node
pointer += 1
if len(tokens) > 1 or (len(lines) == 0 and len(tokens) == 0):
if changed:
# Trick to deal with cases in which an operator with lower
# precedence should be transformed before an operator of higher
# precedence. Such as in the case of `#&[x]` (that is
# equivalent to `Lambda(d_, d_)(x)` in SymPy). In this case the
# operator `&` has lower precedence than `[`, but needs to be
# evaluated first because otherwise `# (&[x])` is not a valid
# expression:
return self._parse_after_braces(tokens, inside_enclosure)
raise SyntaxError("unable to create a single AST for the expression")
if len(lines) > 0:
if tokens[0] and tokens[0][0] == "CompoundExpression":
tokens = tokens[0][1:]
compound_expression = ["CompoundExpression", *lines, *tokens]
return compound_expression
return tokens[0]
def _check_op_compatible(self, op1: str, op2: str):
if op1 == op2:
return True
muldiv = {"*", "/"}
addsub = {"+", "-"}
if op1 in muldiv and op2 in muldiv:
return True
if op1 in addsub and op2 in addsub:
return True
return False
def _from_fullform_to_fullformlist(self, wmexpr: str):
"""
Parses FullForm[Downvalues[]] generated by Mathematica
"""
out: list = []
stack = [out]
generator = re.finditer(r'[\[\],]', wmexpr)
last_pos = 0
for match in generator:
if match is None:
break
position = match.start()
last_expr = wmexpr[last_pos:position].replace(',', '').replace(']', '').replace('[', '').strip()
if match.group() == ',':
if last_expr != '':
stack[-1].append(last_expr)
elif match.group() == ']':
if last_expr != '':
stack[-1].append(last_expr)
stack.pop()
elif match.group() == '[':
stack[-1].append([last_expr])
stack.append(stack[-1][-1])
last_pos = match.end()
return out[0]
def _from_fullformlist_to_fullformsympy(self, pylist: list):
from sympy import Function, Symbol
def converter(expr):
if isinstance(expr, list):
if len(expr) > 0:
head = expr[0]
args = [converter(arg) for arg in expr[1:]]
return Function(head)(*args)
else:
raise ValueError("Empty list of expressions")
elif isinstance(expr, str):
return Symbol(expr)
else:
return _sympify(expr)
return converter(pylist)
_node_conversions = {
"Times": Mul,
"Plus": Add,
"Power": Pow,
"Rational": Rational,
"Log": lambda *a: log(*reversed(a)),
"Log2": lambda x: log(x, 2),
"Log10": lambda x: log(x, 10),
"Exp": exp,
"Sqrt": sqrt,
"Sin": sin,
"Cos": cos,
"Tan": tan,
"Cot": cot,
"Sec": sec,
"Csc": csc,
"ArcSin": asin,
"ArcCos": acos,
"ArcTan": lambda *a: atan2(*reversed(a)) if len(a) == 2 else atan(*a),
"ArcCot": acot,
"ArcSec": asec,
"ArcCsc": acsc,
"Sinh": sinh,
"Cosh": cosh,
"Tanh": tanh,
"Coth": coth,
"Sech": sech,
"Csch": csch,
"ArcSinh": asinh,
"ArcCosh": acosh,
"ArcTanh": atanh,
"ArcCoth": acoth,
"ArcSech": asech,
"ArcCsch": acsch,
"Expand": expand,
"Im": im,
"Re": sympy.re,
"Flatten": flatten,
"Polylog": polylog,
"Cancel": cancel,
# Gamma=gamma,
"TrigExpand": expand_trig,
"Sign": sign,
"Simplify": simplify,
"Defer": UnevaluatedExpr,
"Identity": S,
# Sum=Sum_doit,
# Module=With,
# Block=With,
"Null": lambda *a: S.Zero,
"Mod": Mod,
"Max": Max,
"Min": Min,
"Pochhammer": rf,
"ExpIntegralEi": Ei,
"SinIntegral": Si,
"CosIntegral": Ci,
"AiryAi": airyai,
"AiryAiPrime": airyaiprime,
"AiryBi": airybi,
"AiryBiPrime": airybiprime,
"LogIntegral": li,
"PrimePi": primepi,
"Prime": prime,
"PrimeQ": isprime,
"List": Tuple,
"Greater": StrictGreaterThan,
"GreaterEqual": GreaterThan,
"Less": StrictLessThan,
"LessEqual": LessThan,
"Equal": Equality,
"Or": Or,
"And": And,
"Not": Not,
"Function": _parse_Function,
"Factorial": factorial,
}
_atom_conversions = {
"I": I,
"Pi": pi,
"ExponentialE": E,
"ImaginaryI": I,
"ImaginaryJ": I,
}
def _from_fullformlist_to_sympy(self, full_form_list):
def recurse(expr):
if isinstance(expr, list):
if isinstance(expr[0], list):
head = recurse(expr[0])
else:
head = self._node_conversions.get(expr[0], Function(expr[0]))
return head(*[recurse(arg) for arg in expr[1:]])
else:
return self._atom_conversions.get(expr, sympify(expr))
return recurse(full_form_list)
def _from_fullformsympy_to_sympy(self, mform):
expr = mform
for mma_form, sympy_node in self._node_conversions.items():
expr = expr.replace(Function(mma_form), sympy_node)
return expr
def named_characters_to_unicode(s: str) -> str:
"""
Convert Mathematica's named characters to SymPy equivalents.
The list of named characters is available at
https://reference.wolfram.com/language/guide/ListingOfNamedCharacters.html
"""
from .mathematica_named_characters import mathematica_named_characters
# Mathematica's named characters always start with `\[`, end with
# `]`, and have only characters in [a-zA-Z] in between.
if r"\[" in s: # Don't bother if there's no `\[`
pattern = r"\\\[([a-zA-Z]+)\]"
def replace(match):
name = match.group(1)
if name not in mathematica_named_characters:
raise ValueError(f"Unknown Mathematica named character: {name}")
return mathematica_named_characters[name]
s = re.sub(pattern, replace, s)
if r"\[" in s:
raise SyntaxError(f"Unmatched '\\[' in '{s}'")
return s
| MathematicaParser |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 105548,
"end": 113307
} | class ____:
def test_with_options_allows_override_of_task_settings(self):
def first_cache_key_fn(*_):
return "first cache hit"
def second_cache_key_fn(*_):
return "second cache hit"
block = LocalFileSystem(basepath="foo")
block.save("foo-test", _sync=True)
@task(
name="Initial task",
description="Task before with options",
tags=["tag1", "tag2"],
cache_key_fn=first_cache_key_fn,
cache_expiration=datetime.timedelta(days=1),
retries=2,
retry_delay_seconds=5,
persist_result=True,
result_serializer="pickle",
result_storage=block,
cache_result_in_memory=False,
timeout_seconds=None,
refresh_cache=False,
result_storage_key="foo",
)
def initial_task():
pass
new_block = LocalFileSystem(basepath="bar")
new_block.save("bar-test", _sync=True)
task_with_options = initial_task.with_options(
name="Copied task",
description="A copied task",
tags=["tag3", "tag4"],
cache_key_fn=second_cache_key_fn,
cache_expiration=datetime.timedelta(days=2),
retries=5,
retry_delay_seconds=10,
persist_result=False,
result_serializer="json",
result_storage=new_block,
cache_result_in_memory=True,
timeout_seconds=42,
refresh_cache=True,
result_storage_key="bar",
)
assert task_with_options.name == "Copied task"
assert task_with_options.description == "A copied task"
assert set(task_with_options.tags) == {"tag3", "tag4"}
assert task_with_options.cache_key_fn is second_cache_key_fn
assert task_with_options.cache_expiration == datetime.timedelta(days=2)
assert task_with_options.retries == 5
assert task_with_options.retry_delay_seconds == 10
assert task_with_options.persist_result is False
assert task_with_options.result_serializer == "json"
assert task_with_options.result_storage == new_block
assert task_with_options.cache_result_in_memory is True
assert task_with_options.timeout_seconds == 42
assert task_with_options.refresh_cache is True
assert task_with_options.result_storage_key == "bar"
def test_with_options_uses_existing_settings_when_no_override(self, tmp_path: Path):
def cache_key_fn(*_):
return "cache hit"
storage = LocalFileSystem(basepath=tmp_path)
storage.save("another-passthrough", _sync=True)
@task(
name="Initial task",
description="Task before with options",
tags=["tag1", "tag2"],
cache_key_fn=cache_key_fn,
cache_expiration=datetime.timedelta(days=1),
retries=2,
retry_delay_seconds=5,
persist_result=False,
result_serializer="json",
result_storage=storage,
cache_result_in_memory=False,
timeout_seconds=42,
refresh_cache=True,
result_storage_key="test",
)
def initial_task():
pass
task_with_options = initial_task.with_options()
assert task_with_options is not initial_task
assert (
task_with_options.name == "Initial task"
) # The registry renames tasks to avoid collisions.
assert task_with_options.description == "Task before with options"
assert set(task_with_options.tags) == {"tag1", "tag2"}
assert task_with_options.tags is not initial_task.tags
assert task_with_options.cache_key_fn is cache_key_fn
assert task_with_options.cache_expiration == datetime.timedelta(days=1)
assert task_with_options.retries == 2
assert task_with_options.retry_delay_seconds == 5
assert task_with_options.persist_result is False
assert task_with_options.result_serializer == "json"
assert task_with_options.result_storage == storage
assert task_with_options.cache_result_in_memory is False
assert task_with_options.timeout_seconds == 42
assert task_with_options.refresh_cache is True
assert task_with_options.result_storage_key == "test"
def test_with_options_can_unset_result_options_with_none(self, tmp_path: Path):
result_storage = LocalFileSystem(basepath=tmp_path)
result_storage.save("test-yet-again", _sync=True)
@task(
result_serializer="json",
result_storage=result_storage,
refresh_cache=True,
result_storage_key="test",
)
def initial_task():
pass
task_with_options = initial_task.with_options(
result_serializer=None,
result_storage=None,
refresh_cache=None,
result_storage_key=None,
)
assert task_with_options.result_serializer is None
assert task_with_options.result_storage is None
assert task_with_options.refresh_cache is None
assert task_with_options.result_storage_key is None
def test_tags_are_copied_from_original_task(self):
"Ensure changes to the tags on the original task don't affect the new task"
@task(name="Initial task", tags=["tag1", "tag2"])
def initial_task():
pass
with_options_task = initial_task.with_options(name="With options task")
initial_task.tags.add("tag3")
assert initial_task.tags == {"tag1", "tag2", "tag3"}
assert with_options_task.tags == {"tag1", "tag2"}
def test_with_options_signature_aligns_with_task_signature(self):
task_params = set(inspect.signature(task).parameters.keys())
with_options_params = set(
inspect.signature(Task.with_options).parameters.keys()
)
# `with_options` does not accept a new function
task_params.remove("__fn")
# it doesn't make sense to take the same task definition and change versions
# tags should be used for distinguishing different calls where necessary
task_params.remove("version")
# `self` isn't in task decorator
with_options_params.remove("self")
assert task_params == with_options_params
def test_with_options_allows_override_of_0_retries(self):
@task(retries=3, retry_delay_seconds=10)
def initial_task():
pass
task_with_options = initial_task.with_options(retries=0, retry_delay_seconds=0)
assert task_with_options.retries == 0
assert task_with_options.retry_delay_seconds == 0
async def test_with_options_refresh_cache(
self,
):
@task(
cache_key_fn=lambda *_: "cache hit",
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return (
foo(1, return_state=True),
foo(2, return_state=True),
foo.with_options(refresh_cache=True)(3, return_state=True),
foo(4, return_state=True),
)
first, second, third, fourth = bar()
assert first.name == "Completed"
assert second.name == "Cached"
assert third.name == "Completed"
assert fourth.name == "Cached"
assert await first.result() == await second.result()
assert await second.result() != await third.result()
assert await third.result() == await fourth.result()
assert await fourth.result() != await first.result()
| TestTaskWithOptions |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 2513,
"end": 2625
} | class ____(complexfloating):
name = "complex64"
typecode = "F"
torch_dtype = torch.complex64
| complex64 |
python | google__pytype | pytype/tests/test_tuple1.py | {
"start": 69,
"end": 5364
} | class ____(test_base.BaseTest):
"""Tests for builtins.tuple."""
def test_getitem_int(self):
ty = self.Infer("""
t = ("", 42)
v1 = t[0]
v2 = t[1]
v3 = t[2]
v4 = t[-1]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, Union
t = ... # type: Tuple[str, int]
v1 = ... # type: str
v2 = ... # type: int
v3 = ... # type: Union[str, int]
v4 = ... # type: int
""",
)
@test_base.skip("Needs better slice support in abstract.Tuple, convert.py.")
def test_getitem_slice(self):
ty = self.Infer("""
t = ("", 42)
v1 = t[:]
v2 = t[:1]
v3 = t[1:]
v4 = t[0:1]
v5 = t[0:2:2]
v6 = t[:][0]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple
t = ... # type: Tuple[str, int]
v1 = ... # type: Tuple[str, int]
v2 = ... # type: Tuple[str]
v3 = ... # type: Tuple[int]
v4 = ... # type: Tuple[str]
v5 = ... # type: Tuple[str]
v6 = ... # type: str
""",
)
def test_unpack_tuple(self):
ty = self.Infer("""
v1, v2 = ("", 42)
_, w = ("", 42)
x, (y, z) = ("", (3.14, True))
""")
self.assertTypesMatchPytd(
ty,
"""
v1 = ... # type: str
v2 = ... # type: int
_ = ... # type: str
w = ... # type: int
x = ... # type: str
y = ... # type: float
z = ... # type: bool
""",
)
def test_bad_unpacking(self):
ty, errors = self.InferWithErrors("""
tup = (1, "")
a, = tup # bad-unpacking[e1]
b, c, d = tup # bad-unpacking[e2]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, Union
tup = ... # type: Tuple[int, str]
a = ... # type: Union[int, str]
b = ... # type: Union[int, str]
c = ... # type: Union[int, str]
d = ... # type: Union[int, str]
""",
)
self.assertErrorRegexes(
errors, {"e1": r"2 values.*1 variable", "e2": r"2 values.*3 variables"}
)
def test_mutable_item(self):
ty = self.Infer("""
v = {}
w = v.setdefault("", ([], []))
w[1].append(42)
u = w[2]
""")
self.assertTypesMatchPytd(
ty,
"""
v = ... # type: dict[str, tuple[list[nothing], list[int]]]
w = ... # type: tuple[list[nothing], list[int]]
u = ... # type: list[int]
""",
)
def test_bad_tuple_class_getitem(self):
errors = self.CheckWithErrors("""
v = type((3, ""))
w = v[0] # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"expected 0 parameters, got 1"})
def test_tuple_isinstance(self):
ty = self.Infer("""
x = ()
if isinstance(x, tuple):
y = 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple
x = ... # type: Tuple[()]
y = ... # type: int
""",
)
def test_add_twice(self):
self.Check("() + () + ()")
def test_inplace_add(self):
ty = self.Infer("""
a = ()
a += (42,)
b = ()
b += (42,)
b += ("foo",)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, Union
a = ... # type: Tuple[int]
b = ... # type: Tuple[int, str]
""",
)
def test_add(self):
self.Check("""
from typing import Tuple
a = (1, 2)
b = ('3', '4')
c = a + b
assert_type(c, Tuple[int, int, str, str])
""")
def test_tuple_of_tuple(self):
self.assertNoCrash(
self.Infer,
"""
def f(x=()):
x = (x,)
enumerate(x)
lambda: x
return x
""",
)
def test_tuple_container_matching(self):
# Regression test for crash introduced by using
# matcher.match_var_against_type() for container mutation checking without
# fully populating the view.
self.Check("""
from typing import Dict, Tuple
class Foo:
pass
class _SupplyPoolAsset:
def __init__(self):
self._resources_available = {}
self._resources_used = {} # type: Dict[str, Tuple[Foo, Foo]]
self._PopulateResources()
def _PopulateResources(self):
for x, y, z in __any_object__:
self._resources_available[x] = (y, z)
for x, y, z in __any_object__:
self._resources_available[x] = (y, z)
def RequestResource(self, resource):
self._resources_used[
resource.Name()] = self._resources_available[resource.Name()]
""")
def test_bad_extra_parameterization(self):
errors = self.CheckWithErrors("""
from typing import Tuple
X = Tuple[int][str] # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"expected 0 parameters, got 1"})
def test_legal_extra_parameterization(self):
ty = self.Infer("""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[T][T][str]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[str]
""",
)
if __name__ == "__main__":
test_base.main()
| TupleTest |
python | explosion__spaCy | spacy/cli/convert.py | {
"start": 1011,
"end": 9389
} | class ____(str, Enum):
json = "json"
spacy = "spacy"
@app.command("convert")
def convert_cli(
# fmt: off
input_path: str = Arg(..., help="Input file or directory", exists=True),
output_dir: Path = Arg("-", help="Output directory. '-' for stdout.", allow_dash=True, exists=True),
file_type: FileTypes = Opt("spacy", "--file-type", "-t", help="Type of data to produce"),
n_sents: int = Opt(1, "--n-sents", "-n", help="Number of sentences per doc (0 to disable)"),
seg_sents: bool = Opt(False, "--seg-sents", "-s", help="Segment sentences (for -c ner)"),
model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"),
morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"),
merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"),
converter: str = Opt(AUTO, "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"),
ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True),
lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"),
concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"),
# fmt: on
):
"""
Convert files into json or DocBin format for training. The resulting .spacy
file can be used with the train command and other experiment management
functions.
If no output_dir is specified and the output format is JSON, the data
is written to stdout, so you can pipe them forward to a JSON file:
$ spacy convert some_file.conllu --file-type json > some_file.json
DOCS: https://spacy.io/api/cli#convert
"""
input_path = Path(input_path)
output_dir: Union[str, Path] = "-" if output_dir == Path("-") else output_dir
silent = output_dir == "-"
msg = Printer(no_print=silent)
converter = _get_converter(msg, converter, input_path)
verify_cli_args(msg, input_path, output_dir, file_type.value, converter, ner_map)
convert(
input_path,
output_dir,
file_type=file_type.value,
n_sents=n_sents,
seg_sents=seg_sents,
model=model,
morphology=morphology,
merge_subtokens=merge_subtokens,
converter=converter,
ner_map=ner_map,
lang=lang,
concatenate=concatenate,
silent=silent,
msg=msg,
)
def convert(
input_path: Path,
output_dir: Union[str, Path],
*,
file_type: str = "json",
n_sents: int = 1,
seg_sents: bool = False,
model: Optional[str] = None,
morphology: bool = False,
merge_subtokens: bool = False,
converter: str,
ner_map: Optional[Path] = None,
lang: Optional[str] = None,
concatenate: bool = False,
silent: bool = True,
msg: Optional[Printer] = None,
) -> None:
input_path = Path(input_path)
if not msg:
msg = Printer(no_print=silent)
ner_map = srsly.read_json(ner_map) if ner_map is not None else None
doc_files = []
for input_loc in walk_directory(input_path, converter):
with input_loc.open("r", encoding="utf-8") as infile:
input_data = infile.read()
# Use converter function to convert data
func = CONVERTERS[converter]
docs = func(
input_data,
n_sents=n_sents,
seg_sents=seg_sents,
append_morphology=morphology,
merge_subtokens=merge_subtokens,
lang=lang,
model=model,
no_print=silent,
ner_map=ner_map,
)
doc_files.append((input_loc, docs))
if concatenate:
all_docs = itertools.chain.from_iterable([docs for _, docs in doc_files])
doc_files = [(input_path, all_docs)]
for input_loc, docs in doc_files:
if file_type == "json":
data = [docs_to_json(docs)]
len_docs = len(data)
else:
db = DocBin(docs=docs, store_user_data=True)
len_docs = len(db)
data = db.to_bytes() # type: ignore[assignment]
if output_dir == "-":
_print_docs_to_stdout(data, file_type)
else:
if input_loc != input_path:
subpath = input_loc.relative_to(input_path)
output_file = Path(output_dir) / subpath.with_suffix(f".{file_type}")
else:
output_file = Path(output_dir) / input_loc.parts[-1]
output_file = output_file.with_suffix(f".{file_type}")
_write_docs_to_file(data, output_file, file_type)
msg.good(f"Generated output file ({len_docs} documents): {output_file}")
def _print_docs_to_stdout(data: Any, output_type: str) -> None:
if output_type == "json":
srsly.write_json("-", data)
else:
sys.stdout.buffer.write(data)
def _write_docs_to_file(data: Any, output_file: Path, output_type: str) -> None:
if not output_file.parent.exists():
output_file.parent.mkdir(parents=True)
if output_type == "json":
srsly.write_json(output_file, data)
else:
with output_file.open("wb") as file_:
file_.write(data)
def autodetect_ner_format(input_data: str) -> Optional[str]:
# guess format from the first 20 lines
lines = input_data.split("\n")[:20]
format_guesses = {"ner": 0, "iob": 0}
iob_re = re.compile(r"\S+\|(O|[IB]-\S+)")
ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$")
for line in lines:
line = line.strip()
if iob_re.search(line):
format_guesses["iob"] += 1
if ner_re.search(line):
format_guesses["ner"] += 1
if format_guesses["iob"] == 0 and format_guesses["ner"] > 0:
return "ner"
if format_guesses["ner"] == 0 and format_guesses["iob"] > 0:
return "iob"
return None
def verify_cli_args(
msg: Printer,
input_path: Path,
output_dir: Union[str, Path],
file_type: str,
converter: str,
ner_map: Optional[Path],
):
if file_type not in FILE_TYPES_STDOUT and output_dir == "-":
msg.fail(
f"Can't write .{file_type} data to stdout. Please specify an output directory.",
exits=1,
)
if not input_path.exists():
msg.fail("Input file not found", input_path, exits=1)
if output_dir != "-" and not Path(output_dir).exists():
msg.fail("Output directory not found", output_dir, exits=1)
if ner_map is not None and not Path(ner_map).exists():
msg.fail("NER map not found", ner_map, exits=1)
if input_path.is_dir():
input_locs = walk_directory(input_path, converter)
if len(input_locs) == 0:
msg.fail("No input files in directory", input_path, exits=1)
if converter not in CONVERTERS:
msg.fail(f"Can't find converter for {converter}", exits=1)
def _get_converter(msg, converter, input_path: Path):
if input_path.is_dir():
if converter == AUTO:
input_locs = walk_directory(input_path, suffix=None)
file_types = list(set([loc.suffix[1:] for loc in input_locs]))
if len(file_types) >= 2:
file_types_str = ",".join(file_types)
msg.fail("All input files must be same type", file_types_str, exits=1)
input_path = input_locs[0]
else:
input_path = walk_directory(input_path, suffix=converter)[0]
if converter == AUTO:
converter = input_path.suffix[1:]
if converter == "ner" or converter == "iob":
with input_path.open(encoding="utf8") as file_:
input_data = file_.read()
converter_autodetect = autodetect_ner_format(input_data)
if converter_autodetect == "ner":
msg.info("Auto-detected token-per-line NER format")
converter = converter_autodetect
elif converter_autodetect == "iob":
msg.info("Auto-detected sentence-per-line NER format")
converter = converter_autodetect
else:
msg.warn(
"Can't automatically detect NER format. "
"Conversion may not succeed. "
"See https://spacy.io/api/cli#convert"
)
return converter
| FileTypes |
python | celery__celery | celery/backends/rpc.py | {
"start": 2533,
"end": 12077
} | class ____(base.Backend, AsyncBackendMixin):
"""Base class for the RPC result backend."""
Exchange = kombu.Exchange
Producer = kombu.Producer
ResultConsumer = ResultConsumer
#: Exception raised when there are too many messages for a task id.
BacklogLimitExceeded = BacklogLimitExceeded
persistent = False
supports_autoexpire = True
supports_native_join = True
retry_policy = {
'max_retries': 20,
'interval_start': 0,
'interval_step': 1,
'interval_max': 1,
}
class Consumer(kombu.Consumer):
"""Consumer that requires manual declaration of queues."""
auto_declare = False
class Queue(kombu.Queue):
"""Queue that never caches declaration."""
can_cache_declaration = False
def __init__(self, app, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True, **kwargs):
super().__init__(app, **kwargs)
conf = self.app.conf
self._connection = connection
self._out_of_band = {}
self.persistent = self.prepare_persistent(persistent)
self.delivery_mode = 2 if self.persistent else 1
exchange = exchange or conf.result_exchange
exchange_type = exchange_type or conf.result_exchange_type
self.exchange = self._create_exchange(
exchange, exchange_type, self.delivery_mode,
)
self.serializer = serializer or conf.result_serializer
self.auto_delete = auto_delete
self.result_consumer = self.ResultConsumer(
self, self.app, self.accept,
self._pending_results, self._pending_messages,
)
if register_after_fork is not None:
register_after_fork(self, _on_after_fork_cleanup_backend)
def _after_fork(self):
# clear state for child processes.
self._pending_results.clear()
self.result_consumer._after_fork()
def _create_exchange(self, name, type='direct', delivery_mode=2):
# uses direct to queue routing (anon exchange).
return self.Exchange(None)
def _create_binding(self, task_id):
"""Create new binding for task with id."""
# RPC backend caches the binding, as one queue is used for all tasks.
return self.binding
def ensure_chords_allowed(self):
raise NotImplementedError(E_NO_CHORD_SUPPORT.strip())
def on_task_call(self, producer, task_id):
# Called every time a task is sent when using this backend.
# We declare the queue we receive replies on in advance of sending
# the message, but we skip this if running in the prefork pool
# (task_join_will_block), as we know the queue is already declared.
if not task_join_will_block():
maybe_declare(self.binding(producer.channel), retry=True)
def destination_for(self, task_id, request):
"""Get the destination for result by task id.
Returns:
Tuple[str, str]: tuple of ``(reply_to, correlation_id)``.
"""
# Backends didn't always receive the `request`, so we must still
# support old code that relies on current_task.
try:
request = request or current_task.request
except AttributeError:
raise RuntimeError(
f'RPC backend missing task request for {task_id!r}')
return request.reply_to, request.correlation_id or task_id
def on_reply_declare(self, task_id):
# Return value here is used as the `declare=` argument
# for Producer.publish.
# By default we don't have to declare anything when sending a result.
pass
def on_result_fulfilled(self, result):
# This usually cancels the queue after the result is received,
# but we don't have to cancel since we have one queue per process.
pass
def as_uri(self, include_password=True):
return 'rpc://'
def store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Send task return value and state."""
routing_key, correlation_id = self.destination_for(task_id, request)
if not routing_key:
return
with self.app.amqp.producer_pool.acquire(block=True) as producer:
producer.publish(
self._to_result(task_id, state, result, traceback, request),
exchange=self.exchange,
routing_key=routing_key,
correlation_id=correlation_id,
serializer=self.serializer,
retry=True, retry_policy=self.retry_policy,
declare=self.on_reply_declare(task_id),
delivery_mode=self.delivery_mode,
)
return result
def _to_result(self, task_id, state, result, traceback, request):
return {
'task_id': task_id,
'status': state,
'result': self.encode_result(result, state),
'traceback': traceback,
'children': self.current_task_children(request),
}
def on_out_of_band_result(self, task_id, message):
# Callback called when a reply for a task is received,
# but we have no idea what to do with it.
# Since the result is not pending, we put it in a separate
# buffer: probably it will become pending later.
if self.result_consumer:
self.result_consumer.on_out_of_band_result(message)
self._out_of_band[task_id] = message
def get_task_meta(self, task_id, backlog_limit=1000):
buffered = self._out_of_band.pop(task_id, None)
if buffered:
return self._set_cache_by_message(task_id, buffered)
# Polling and using basic_get
latest_by_id = {}
prev = None
for acc in self._slurp_from_queue(task_id, self.accept, backlog_limit):
tid = self._get_message_task_id(acc)
prev, latest_by_id[tid] = latest_by_id.get(tid), acc
if prev:
# backends aren't expected to keep history,
# so we delete everything except the most recent state.
prev.ack()
prev = None
latest = latest_by_id.pop(task_id, None)
for tid, msg in latest_by_id.items():
self.on_out_of_band_result(tid, msg)
if latest:
latest.requeue()
return self._set_cache_by_message(task_id, latest)
else:
# no new state, use previous
try:
return self._cache[task_id]
except KeyError:
# result probably pending.
return {'status': states.PENDING, 'result': None}
poll = get_task_meta # XXX compat
def _set_cache_by_message(self, task_id, message):
payload = self._cache[task_id] = self.meta_from_decoded(
message.payload)
return payload
def _slurp_from_queue(self, task_id, accept,
limit=1000, no_ack=False):
with self.app.pool.acquire_channel(block=True) as (_, channel):
binding = self._create_binding(task_id)(channel)
binding.declare()
for _ in range(limit):
msg = binding.get(accept=accept, no_ack=no_ack)
if not msg:
break
yield msg
else:
raise self.BacklogLimitExceeded(task_id)
def _get_message_task_id(self, message):
try:
# try property first so we don't have to deserialize
# the payload.
return message.properties['correlation_id']
except (AttributeError, KeyError):
# message sent by old Celery version, need to deserialize.
return message.payload['task_id']
def revive(self, channel):
pass
def reload_task_result(self, task_id):
raise NotImplementedError(
'reload_task_result is not supported by this backend.')
def reload_group_result(self, task_id):
"""Reload group result, even if it has been previously fetched."""
raise NotImplementedError(
'reload_group_result is not supported by this backend.')
def save_group(self, group_id, result):
raise NotImplementedError(
'save_group is not supported by this backend.')
def restore_group(self, group_id, cache=True):
raise NotImplementedError(
'restore_group is not supported by this backend.')
def delete_group(self, group_id):
raise NotImplementedError(
'delete_group is not supported by this backend.')
def __reduce__(self, args=(), kwargs=None):
kwargs = {} if not kwargs else kwargs
return super().__reduce__(args, dict(
kwargs,
connection=self._connection,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
persistent=self.persistent,
serializer=self.serializer,
auto_delete=self.auto_delete,
expires=self.expires,
))
@property
def binding(self):
return self.Queue(
self.oid, self.exchange, self.oid,
durable=False,
auto_delete=True,
expires=self.expires,
)
@cached_property
def oid(self):
# cached here is the app thread OID: name of queue we receive results on.
return self.app.thread_oid
| RPCBackend |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 63398,
"end": 64048
} | class ____(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module: _ModuleLike):
super().__init__(module)
self._setup_prefix()
def _setup_prefix(self):
# Assume that metadata may be nested inside a "basket"
# of multiple eggs and use module_path instead of .archive.
eggs = filter(_is_egg_path, _parents(self.module_path))
egg = next(eggs, None)
egg and self._set_egg(egg)
def _set_egg(self, path: str):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
| EggProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/util.py | {
"start": 5886,
"end": 16336
} | class ____(FrozenSet[str]):
"""Keeps track of the options sent to
:paramref:`.relationship.cascade`"""
_add_w_all_cascades = all_cascades.difference(
["all", "none", "delete-orphan"]
)
_allowed_cascades = all_cascades
_viewonly_cascades = ["expunge", "all", "none", "refresh-expire", "merge"]
__slots__ = (
"save_update",
"delete",
"refresh_expire",
"merge",
"expunge",
"delete_orphan",
)
save_update: bool
delete: bool
refresh_expire: bool
merge: bool
expunge: bool
delete_orphan: bool
def __new__(
cls, value_list: Optional[Union[Iterable[str], str]]
) -> CascadeOptions:
if isinstance(value_list, str) or value_list is None:
return cls.from_string(value_list) # type: ignore
values = set(value_list)
if values.difference(cls._allowed_cascades):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s"
% ", ".join(
[
repr(x)
for x in sorted(
values.difference(cls._allowed_cascades)
)
]
)
)
if "all" in values:
values.update(cls._add_w_all_cascades)
if "none" in values:
values.clear()
values.discard("all")
self = super().__new__(cls, values)
self.save_update = "save-update" in values
self.delete = "delete" in values
self.refresh_expire = "refresh-expire" in values
self.merge = "merge" in values
self.expunge = "expunge" in values
self.delete_orphan = "delete-orphan" in values
if self.delete_orphan and not self.delete:
util.warn("The 'delete-orphan' cascade option requires 'delete'.")
return self
def __repr__(self):
return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)]))
@classmethod
def from_string(cls, arg):
values = [c for c in re.split(r"\s*,\s*", arg or "") if c]
return cls(values)
def _validator_events(desc, key, validator, include_removes, include_backrefs):
"""Runs a validation method on an attribute value to be set or
appended.
"""
if not include_backrefs:
def detect_is_backref(state, initiator):
impl = state.manager[key].impl
return initiator.impl is not impl
if include_removes:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value, False)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [
validator(obj, key, value, False) for value in values
]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value, False)
else:
return value
def remove(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
validator(state.obj(), key, value, True)
else:
def append(state, value, initiator):
if initiator.op is not attributes.OP_BULK_REPLACE and (
include_backrefs or not detect_is_backref(state, initiator)
):
return validator(state.obj(), key, value)
else:
return value
def bulk_set(state, values, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
obj = state.obj()
values[:] = [validator(obj, key, value) for value in values]
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value)
else:
return value
event.listen(desc, "append", append, raw=True, retval=True)
event.listen(desc, "bulk_replace", bulk_set, raw=True)
event.listen(desc, "set", set_, raw=True, retval=True)
if include_removes:
event.listen(desc, "remove", remove, raw=True, retval=True)
def polymorphic_union(
table_map, typecolname, aliasname="p_union", cast_nulls=True
):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`_schema.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for
each row. If ``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented
as labeled NULLs, will be passed into CAST. This is a legacy behavior
that is problematic on some backends such as Oracle - in which case it
can be set to False.
"""
colnames: util.OrderedSet[str] = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map:
table = table_map[key]
table = coercions.expect(roles.FromClauseRole, table)
table_map[key] = table
m = {}
for c in table.c:
if c.key == typecolname:
raise sa_exc.InvalidRequestError(
"Polymorphic union can't use '%s' as the discriminator "
"column due to mapped column %r; please apply the "
"'typecolname' "
"argument; this is available on "
"ConcreteBase as '_concrete_discriminator_name'"
% (typecolname, c)
)
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type_, table in table_map.items():
if typecolname is not None:
result.append(
sql.select(
*(
[col(name, table) for name in colnames]
+ [
sql.literal_column(
sql_util._quote_ddl_expr(type_)
).label(typecolname)
]
)
).select_from(table)
)
else:
result.append(
sql.select(
*[col(name, table) for name in colnames]
).select_from(table)
)
return sql.union_all(*result).alias(aliasname)
def identity_key(
class_: Optional[Type[_T]] = None,
ident: Union[Any, Tuple[Any, ...]] = None,
*,
instance: Optional[_T] = None,
row: Optional[Union[Row[Unpack[TupleAny]], RowMapping]] = None,
identity_token: Optional[Any] = None,
) -> _IdentityKeyType[_T]:
r"""Generate "identity key" tuples, as are used as keys in the
:attr:`.Session.identity_map` dictionary.
This function has several call styles:
* ``identity_key(class, ident, identity_token=token)``
This form receives a mapped class and a primary key scalar or
tuple as an argument.
E.g.::
>>> identity_key(MyClass, (1, 2))
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param ident: primary key, may be a scalar or tuple argument.
:param identity_token: optional identity token
* ``identity_key(instance=instance)``
This form will produce the identity key for a given instance. The
instance need not be persistent, only that its primary key attributes
are populated (else the key will contain ``None`` for those missing
values).
E.g.::
>>> instance = MyClass(1, 2)
>>> identity_key(instance=instance)
(<class '__main__.MyClass'>, (1, 2), None)
In this form, the given instance is ultimately run though
:meth:`_orm.Mapper.identity_key_from_instance`, which will have the
effect of performing a database check for the corresponding row
if the object is expired.
:param instance: object instance (must be given as a keyword arg)
* ``identity_key(class, row=row, identity_token=token)``
This form is similar to the class/tuple form, except is passed a
database result row as a :class:`.Row` or :class:`.RowMapping` object.
E.g.::
>>> row = engine.execute(text("select * from table where a=1 and b=2")).first()
>>> identity_key(MyClass, row=row)
(<class '__main__.MyClass'>, (1, 2), None)
:param class: mapped class (must be a positional argument)
:param row: :class:`.Row` row returned by a :class:`_engine.CursorResult`
(must be given as a keyword arg)
:param identity_token: optional identity token
""" # noqa: E501
if class_ is not None:
mapper = class_mapper(class_)
if row is None:
if ident is None:
raise sa_exc.ArgumentError("ident or row is required")
return mapper.identity_key_from_primary_key(
tuple(util.to_list(ident)), identity_token=identity_token
)
else:
return mapper.identity_key_from_row(
row, identity_token=identity_token
)
elif instance is not None:
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
else:
raise sa_exc.ArgumentError("class or instance is required")
| CascadeOptions |
python | streamlit__streamlit | lib/streamlit/runtime/state/common.py | {
"start": 3985,
"end": 5527
} | class ____(Generic[T]):
"""Metadata associated with a single widget. Immutable."""
id: str
deserializer: WidgetDeserializer[T] = field(repr=False)
serializer: WidgetSerializer[T] = field(repr=False)
value_type: ValueFieldName
# An optional user-code callback invoked when the widget's value changes.
# Widget callbacks are called at the start of a script run, before the
# body of the script is executed.
callback: WidgetCallback | None = None
# An optional dictionary of event names to user-code callbacks. These are
# invoked when the corresponding widget event occurs. Callbacks are called
# at the start of a script run, before the body of the script is executed.
# Right now, multiple callbacks are only supported for widgets with a
# `value_type` of `json_value` or `json_trigger_value`. The keys in this
# dictionary should correspond to keys in the widget's JSON state.
callbacks: dict[str, WidgetCallback] | None = None
callback_args: WidgetArgs | None = None
callback_kwargs: WidgetKwargs | None = None
fragment_id: str | None = None
# Optional presenter hook used for customizing the user-visible value in
# st.session_state. This is intended for advanced widgets (e.g. Custom
# Components v2) that need to synthesize a presentation-only value from
# multiple internal widget states.
presenter: WidgetValuePresenter | None = None
def __repr__(self) -> str:
return util.repr_(self)
@dataclass(frozen=True)
| WidgetMetadata |
python | getsentry__sentry | tests/sentry/seer/assisted_query/test_discover_tools.py | {
"start": 5002,
"end": 15095
} | class ____(APITestCase, SnubaTestCase):
def setUp(self):
super().setUp()
self.min_ago = before_now(minutes=1)
def test_get_event_filter_key_values_tag_key(self):
"""Test getting values for a tag key"""
# Create events with the same tag key but different values
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"environment": "production"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"tags": {"environment": "staging"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"tags": {"environment": "production"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="environment",
)
assert result is not None
assert len(result) > 0
# Check structure of returned values
for item in result:
assert "value" in item
assert "count" in item
assert "lastSeen" in item
assert "firstSeen" in item
# Check that we have our environment values
values = {item["value"] for item in result}
assert "production" in values
assert "staging" in values
# Check counts
value_counts = {item["value"]: item["count"] for item in result}
assert value_counts["production"] == 2
assert value_counts["staging"] == 1
def test_get_event_filter_key_values_feature_flag(self):
"""Test getting values for a feature flag"""
# Create events with feature flags
self.store_event(
data={
"contexts": {
"flags": {
"values": [
{"flag": "organizations:test-feature", "result": True},
]
}
},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"contexts": {
"flags": {
"values": [
{"flag": "organizations:test-feature", "result": False},
]
}
},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="organizations:test-feature",
)
assert result is not None
assert len(result) == 2
for item in result:
assert "value" in item
assert "count" in item
assert "lastSeen" in item
assert "firstSeen" in item
assert item["count"] == 1
values = {item["value"] for item in result}
assert "true" in values
assert "false" in values
def test_get_event_filter_key_values_has_key(self):
"""Test that 'has' key returns all available tag keys"""
# Create event with custom tag
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"custom_tag": "value", "custom2": "value2"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="has",
)
assert result is not None
assert isinstance(result, list)
assert len(result) > 0
values = {item["value"] for item in result}
assert "custom_tag" in values
assert "custom2" in values
def test_get_event_filter_key_values_aggregate_function_returns_empty(self):
"""Test that aggregate functions return empty list"""
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="count()",
)
assert result == []
def test_get_event_filter_key_values_with_substring_filter(self):
"""Test substring filtering of filter key values"""
# Create events with different environment values
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"environment": "production"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"tags": {"environment": "production-eu"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"tags": {"environment": "staging"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "d" * 32,
"tags": {"environment": "development"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
# Filter for "prod" substring - should only return production values
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="environment",
substring="prod",
)
assert result is not None
assert isinstance(result, list)
assert len(result) > 0
# Should only contain values with "prod" in them
values = {item["value"] for item in result}
assert "production" in values
assert "production-eu" in values
assert "staging" not in values
assert "development" not in values
def test_get_event_filter_key_values_nonexistent_tag(self):
"""Test that nonexistent filter key returns empty list"""
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="nonexistent_tag_key_12345",
)
# Should return empty list, not None
assert result == []
def test_get_event_filter_key_values_multiple_projects(self):
"""Test getting filter key values across multiple projects"""
project2 = self.create_project(organization=self.organization)
# Create events in both projects with same tag key
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"region": "us-east"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"tags": {"region": "us-west"},
"timestamp": self.min_ago.isoformat(),
},
project_id=project2.id,
)
# Empty projects should be treated as a query for all projects.
for pids in [[self.project.id, project2.id], [], None]:
result = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=pids,
filter_key="region",
)
assert result is not None
assert isinstance(result, list)
assert len(result) > 0
# Should have values from both projects
values = {item["value"] for item in result}
assert "us-east" in values
assert "us-west" in values
def test_get_event_filter_key_values_different_stats_periods(self):
"""Test that different stats periods affect results"""
# Create an event 2 days ago
two_days_ago = before_now(days=2)
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"test_tag": "old_value"},
"timestamp": two_days_ago.isoformat(),
},
project_id=self.project.id,
)
# Create a recent event
self.store_event(
data={
"event_id": "b" * 32,
"tags": {"test_tag": "new_value"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
# Query with 1 day period - should only get recent
result_1d = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="test_tag",
stats_period="24h",
)
# Query with 7 day period - should get both
result_7d = get_event_filter_key_values(
org_id=self.organization.id,
project_ids=[self.project.id],
filter_key="test_tag",
stats_period="7d",
)
assert result_1d is not None
assert result_7d is not None
values_1d = {item["value"] for item in result_1d}
values_7d = {item["value"] for item in result_7d}
# Recent value should be in both
assert "new_value" in values_1d
assert "new_value" in values_7d
# Old value should only be in 7d results
assert "old_value" not in values_1d
assert "old_value" in values_7d
| TestGetEventFilterKeyValues |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 10592,
"end": 10943
} | class ____(Enum):
"""Filter options for FilterParam."""
EQUAL = "eq"
NOT_EQUAL = "ne"
LESS_THAN = "lt"
LESS_THAN_EQUAL = "le"
GREATER_THAN = "gt"
GREATER_THAN_EQUAL = "ge"
IN = "in"
NOT_IN = "not_in"
ANY_EQUAL = "any_eq"
ALL_EQUAL = "all_eq"
IS_NONE = "is_none"
CONTAINS = "contains"
| FilterOptionEnum |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 17586,
"end": 20725
} | class ____:
def test_plurals(self):
assert self.locale._format_timeframe("seconds", 0) == "0 sekund"
assert self.locale._format_timeframe("second", 1) == "sekundę"
assert self.locale._format_timeframe("seconds", 2) == "2 sekundy"
assert self.locale._format_timeframe("seconds", 5) == "5 sekund"
assert self.locale._format_timeframe("seconds", 21) == "21 sekund"
assert self.locale._format_timeframe("seconds", 22) == "22 sekundy"
assert self.locale._format_timeframe("seconds", 25) == "25 sekund"
assert self.locale._format_timeframe("minutes", 0) == "0 minut"
assert self.locale._format_timeframe("minute", 1) == "minutę"
assert self.locale._format_timeframe("minutes", 2) == "2 minuty"
assert self.locale._format_timeframe("minutes", 5) == "5 minut"
assert self.locale._format_timeframe("minutes", 21) == "21 minut"
assert self.locale._format_timeframe("minutes", 22) == "22 minuty"
assert self.locale._format_timeframe("minutes", 25) == "25 minut"
assert self.locale._format_timeframe("hours", 0) == "0 godzin"
assert self.locale._format_timeframe("hour", 1) == "godzinę"
assert self.locale._format_timeframe("hours", 2) == "2 godziny"
assert self.locale._format_timeframe("hours", 5) == "5 godzin"
assert self.locale._format_timeframe("hours", 21) == "21 godzin"
assert self.locale._format_timeframe("hours", 22) == "22 godziny"
assert self.locale._format_timeframe("hours", 25) == "25 godzin"
assert self.locale._format_timeframe("weeks", 0) == "0 tygodni"
assert self.locale._format_timeframe("week", 1) == "tydzień"
assert self.locale._format_timeframe("weeks", 2) == "2 tygodnie"
assert self.locale._format_timeframe("weeks", 5) == "5 tygodni"
assert self.locale._format_timeframe("weeks", 21) == "21 tygodni"
assert self.locale._format_timeframe("weeks", 22) == "22 tygodnie"
assert self.locale._format_timeframe("weeks", 25) == "25 tygodni"
assert self.locale._format_timeframe("months", 0) == "0 miesięcy"
assert self.locale._format_timeframe("month", 1) == "miesiąc"
assert self.locale._format_timeframe("months", 2) == "2 miesiące"
assert self.locale._format_timeframe("months", 5) == "5 miesięcy"
assert self.locale._format_timeframe("months", 21) == "21 miesięcy"
assert self.locale._format_timeframe("months", 22) == "22 miesiące"
assert self.locale._format_timeframe("months", 25) == "25 miesięcy"
assert self.locale._format_timeframe("years", 0) == "0 lat"
assert self.locale._format_timeframe("year", 1) == "rok"
assert self.locale._format_timeframe("years", 2) == "2 lata"
assert self.locale._format_timeframe("years", 5) == "5 lat"
assert self.locale._format_timeframe("years", 21) == "21 lat"
assert self.locale._format_timeframe("years", 22) == "22 lata"
assert self.locale._format_timeframe("years", 25) == "25 lat"
@pytest.mark.usefixtures("lang_locale")
| TestPolishLocale |
python | coleifer__peewee | tests/fields.py | {
"start": 41335,
"end": 41529
} | class ____(TextField):
def db_value(self, value):
return ','.join(value) if value else ''
def python_value(self, value):
return value.split(',') if value else []
| ListField |
python | numba__numba | numba/tests/npyufunc/test_gufunc.py | {
"start": 18511,
"end": 28798
} | class ____(MemoryLeakMixin, TestCase):
target = 'cpu'
def check_add_gufunc(self, gufunc):
@jit(nopython=True)
def jit_add(x, y, res):
gufunc(x, y, res)
x = np.arange(40, dtype='i8').reshape(4, 2, 5)
y = np.int32(100)
res = np.zeros_like(x)
jit_add(x, y, res)
self.assertPreciseEqual(res, x + y)
def test_add_static(self):
@guvectorize('int64[:], int64, int64[:]', '(n),()->(n)',
target=self.target)
def add(x, y, res):
for i in range(x.shape[0]):
res[i] = x[i] + y
self.check_add_gufunc(add)
def test_add_static_cast_args(self):
# cast the second argument from i32 -> i64
@guvectorize('int64[:], int64, int64[:]', '(n),()->(n)',
target=self.target)
def add(x, y, res):
for i in range(x.shape[0]):
res[i] = x[i] + y
self.check_add_gufunc(add)
def test_add_dynamic(self):
@guvectorize('(n),()->(n)', target=self.target)
def add(x, y, res):
for i in range(x.shape[0]):
res[i] = x[i] + y
self.check_add_gufunc(add)
@unittest.expectedFailure
def test_object_mode(self):
@guvectorize('(n),()->(n)', target=self.target, forceobj=True)
def add(x, y, res):
for i in range(x.shape[0]):
res[i] = x[i] + y
self.check_add_gufunc(add)
def check_matmul(self, jit_func):
matrix_ct = 1001
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5)
C = np.arange(matrix_ct * 2 * 5, dtype=np.float32).reshape(matrix_ct, 2, 5)
jit_func(A, B, C)
Gold = np.matmul(A, B)
np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8)
def test_njit_matmul_call(self):
gufunc = guvectorize('(m,n),(n,p)->(m,p)',
target=self.target)(matmulcore)
@jit(nopython=True)
def matmul_jit(A, B, C):
return gufunc(A, B, C)
self.check_matmul(matmul_jit)
def test_axpy(self):
gufunc = GUVectorize(axpy, '(),(),() -> ()', target=self.target,
is_dynamic=True)
@jit(nopython=True)
def axpy_jit(a, x, y, out):
gufunc(a, x, y, out)
x = np.arange(10, dtype=np.intp)
out = np.zeros_like(x)
axpy_jit(x, x, x, out)
self.assertPreciseEqual(out, x * x + x)
def test_output_scalar(self):
@guvectorize('(n),(m) -> ()')
def gufunc(x, y, res):
res[0] = x.sum() + y.sum()
@jit(nopython=True)
def jit_func(x, y, res):
gufunc(x, y, res)
x = np.arange(40, dtype='i8').reshape(4, 10)
y = np.arange(20, dtype='i8')
res = np.zeros(4, dtype='i8')
jit_func(x, y, res)
expected = np.zeros_like(res)
gufunc(x, y, expected)
self.assertPreciseEqual(res, expected)
def test_input_scalar(self):
@guvectorize('() -> ()')
def gufunc(x, res):
res[0] = x + 100
@jit(nopython=True)
def jit_func(x, res):
gufunc(x, res)
x = np.arange(40, dtype='i8').reshape(5, 2, 4)
res = np.zeros_like(x)
jit_func(x, res)
expected = np.zeros_like(res)
gufunc(x, expected)
self.assertPreciseEqual(res, expected)
def test_gufunc_ndim_mismatch(self):
signature = "(n, m), (n, n, n) -> (m), (n, n)"
@guvectorize(signature)
def bar(x, y, res, out):
res[0] = 123
out[0] = 456
@jit(nopython=True)
def foo(x, y, res, out):
bar(x, y, res, out)
N, M = 2, 3
x = np.arange(N**2).reshape(N, N)
y = np.arange(N**3).reshape(N, N, N)
res = np.arange(M)
out = np.arange(N**2).reshape(N, N)
# calling with a 1d array should result in an error
with self.assertRaises(TypingError) as raises:
x_ = np.arange(N * N)
foo(x_, y, res, out)
msg = ('bar: Input operand 0 does not have enough dimensions (has '
f'1, gufunc core with signature {signature} requires 2)')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
y_ = np.arange(N * N).reshape(N, N)
foo(x, y_, res, out)
msg = ('bar: Input operand 1 does not have enough dimensions (has '
f'2, gufunc core with signature {signature} requires 3)')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
res_ = np.array(3)
foo(x, y, res_, out)
msg = ('bar: Output operand 0 does not have enough dimensions (has '
f'0, gufunc core with signature {signature} requires 1)')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
out_ = np.arange(N)
foo(x, y, res, out_)
msg = ('bar: Output operand 1 does not have enough dimensions (has '
f'1, gufunc core with signature {signature} requires 2)')
self.assertIn(msg, str(raises.exception))
def test_mismatch_inner_dimensions(self):
@guvectorize('(n),(n) -> ()')
def bar(x, y, res):
res[0] = 123
@jit(nopython=True)
def foo(x, y, res):
bar(x, y, res)
N = 2
M = 3
x = np.empty((5, 3, N))
y = np.empty((M,))
res = np.zeros((5, 3))
# ensure that NumPy raises an exception
with self.assertRaises(ValueError) as np_raises:
bar(x, y, res)
msg = ('Input operand 1 has a mismatch in its core dimension 0, with '
'gufunc signature (n),(n) -> () (size 3 is different from 2)')
self.assertIn(msg, str(np_raises.exception))
with self.assertRaises(ValueError) as raises:
foo(x, y, res)
msg = ('Operand has a mismatch in one of its core dimensions')
self.assertIn(msg, str(raises.exception))
def test_mismatch_inner_dimensions_input_output(self):
@guvectorize('(n),(m) -> (n)')
def bar(x, y, res):
res[0] = 123
@jit(nopython=True)
def foo(x, y, res):
bar(x, y, res)
N = 2
M = 3
x = np.empty((5, 3, N))
y = np.empty((M,))
res = np.zeros((5, 3))
# ensure that NumPy raises an exception
with self.assertRaises(ValueError) as np_raises:
bar(x, y, res)
msg = ('Output operand 0 has a mismatch in its core dimension 0, with '
'gufunc signature (n),(m) -> (n) (size 3 is different from 2)')
self.assertIn(msg, str(np_raises.exception))
with self.assertRaises(ValueError) as raises:
foo(x, y, res)
msg = ('Operand has a mismatch in one of its core dimensions')
self.assertIn(msg, str(raises.exception))
def test_mismatch_inner_dimensions_output(self):
@guvectorize('(n),(m) -> (m),(m)')
def bar(x, y, res, out):
res[0] = 123
out[0] = 456
@jit(nopython=True)
def foo(x, y, res, out):
bar(x, y, res, out)
N = 2
M = 3
x = np.empty((N,))
y = np.empty((M,))
res = np.zeros((N,))
out = np.zeros((M,))
# ensure that NumPy raises an exception
with self.assertRaises(ValueError) as np_raises:
bar(x, y, res, out)
msg = ('Output operand 0 has a mismatch in its core dimension 0, with '
'gufunc signature (n),(m) -> (m),(m) (size 2 is different from 3)')
self.assertIn(msg, str(np_raises.exception))
with self.assertRaises(ValueError) as raises:
foo(x, y, res, out)
msg = ('Operand has a mismatch in one of its core dimensions')
self.assertIn(msg, str(raises.exception))
def test_mismatch_loop_shape(self):
@guvectorize('(n),(n) -> ()')
def bar(x, y, res):
res[0] = 123
@jit(nopython=True)
def foo(x, y, res):
bar(x, y, res)
N = 2
x = np.empty((1, 5, 3, N,))
y = np.empty((5, 3, N,))
res = np.zeros((5, 3))
with self.assertRaises(ValueError) as raises:
foo(x, y, res)
msg = ('Loop and array shapes are incompatible')
self.assertIn(msg, str(raises.exception))
def test_mismatch_loop_shape_2(self):
@guvectorize('(n),(n) -> (), (n)')
def gufunc(x, y, res, out):
res[0] = x.sum()
for i in range(x.shape[0]):
out[i] += x[i] + y.sum()
@jit
def jit_func(x, y, res, out):
gufunc(x, y, res, out)
N = 2
x = np.arange(4*N).reshape((4, N))
y = np.arange(N)
res = np.empty((3,))
out = np.zeros((3, N))
# ensure that NumPy raises an exception
with self.assertRaises(ValueError) as np_raises:
gufunc(x, y, res, out)
msg = ('operands could not be broadcast together with remapped shapes '
'[original->remapped]: (4,2)->(4,newaxis) (2,)->() '
'(3,)->(3,newaxis) (3,2)->(3,2) and requested shape (2)')
self.assertIn(msg, str(np_raises.exception))
with self.assertRaises(ValueError) as raises:
jit_func(x, y, res, out)
msg = ('Loop and array shapes are incompatible')
self.assertIn(msg, str(raises.exception))
def test_issue_10287(self):
@guvectorize([(float64[:], int64, float64[:])], "(n),()->(n)")
def guve(x, n, res):
pass
@jit
def njit_guve(x, n):
res = np.zeros_like(x)
guve(x, n, res)
return res
rng = np.random.default_rng(69)
for _ in range(20000):
x = rng.random(65)
y = np.repeat(x[None], 130, axis=0)
njit_guve(y, 5)
if __name__ == '__main__':
unittest.main()
| TestGUVectorizeJit |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 6036,
"end": 6245
} | class ____:
def __repr__(self):
return "°<script>alert(1)</script>"
def repr_not_syntax_highlightable(x=CustomRepr()):
"""The default value for x fails to highlight with pygments."""
| CustomRepr |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 809,
"end": 929
} | class ____(TextMessageChunkEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_CHUNK
| TextMessageChunkWorkflowEvent |
python | apache__airflow | airflow-core/src/airflow/serialization/serialized_objects.py | {
"start": 9760,
"end": 19380
} | class ____(AirflowException):
def __init__(self, type_string: str) -> None:
self.type_string = type_string
def __str__(self) -> str:
return (
f"Priority weight strategy class {self.type_string!r} is not registered or "
"you have a top level database access that disrupted the session. "
"Please check the airflow best practices documentation."
)
def _encode_trigger(trigger: BaseEventTrigger | dict):
def _ensure_serialized(d):
"""
Make sure the kwargs dict is JSON-serializable.
This is done with BaseSerialization logic. A simple check is added to
ensure we don't double-serialize, which is possible when a trigger goes
through multiple serialization layers.
"""
if isinstance(d, dict) and Encoding.TYPE in d:
return d
return BaseSerialization.serialize(d)
if isinstance(trigger, dict):
classpath = trigger["classpath"]
kwargs = trigger["kwargs"]
else:
classpath, kwargs = trigger.serialize()
return {
"classpath": classpath,
"kwargs": {k: _ensure_serialized(v) for k, v in kwargs.items()},
}
def encode_asset_condition(var: BaseAsset) -> dict[str, Any]:
"""
Encode an asset condition.
:meta private:
"""
if isinstance(var, Asset):
def _encode_watcher(watcher: AssetWatcher):
return {
"name": watcher.name,
"trigger": _encode_trigger(watcher.trigger),
}
asset = {
"__type": DAT.ASSET,
"name": var.name,
"uri": var.uri,
"group": var.group,
"extra": var.extra,
}
if len(var.watchers) > 0:
asset["watchers"] = [_encode_watcher(watcher) for watcher in var.watchers]
return asset
if isinstance(var, AssetAlias):
return {"__type": DAT.ASSET_ALIAS, "name": var.name, "group": var.group}
if isinstance(var, AssetAll):
return {
"__type": DAT.ASSET_ALL,
"objects": [encode_asset_condition(x) for x in var.objects],
}
if isinstance(var, AssetAny):
return {
"__type": DAT.ASSET_ANY,
"objects": [encode_asset_condition(x) for x in var.objects],
}
if isinstance(var, AssetRef):
return {"__type": DAT.ASSET_REF, **attrs.asdict(var)}
raise ValueError(f"serialization not implemented for {type(var).__name__!r}")
def decode_asset_condition(var: dict[str, Any]) -> BaseAsset:
"""
Decode a previously serialized asset condition.
:meta private:
"""
dat = var["__type"]
if dat == DAT.ASSET:
return decode_asset(var)
if dat == DAT.ASSET_ALL:
return AssetAll(*(decode_asset_condition(x) for x in var["objects"]))
if dat == DAT.ASSET_ANY:
return AssetAny(*(decode_asset_condition(x) for x in var["objects"]))
if dat == DAT.ASSET_ALIAS:
return AssetAlias(name=var["name"], group=var["group"])
if dat == DAT.ASSET_REF:
return Asset.ref(**{k: v for k, v in var.items() if k != "__type"})
raise ValueError(f"deserialization not implemented for DAT {dat!r}")
def smart_decode_trigger_kwargs(d):
"""
Slightly clean up kwargs for display or execution.
This detects one level of BaseSerialization and tries to deserialize the
content, removing some __type __var ugliness when the value is displayed
in UI to the user and/or while execution.
"""
if not isinstance(d, dict) or Encoding.TYPE not in d:
return d
return BaseSerialization.deserialize(d)
def decode_asset(var: dict[str, Any]):
watchers = var.get("watchers", [])
return Asset(
name=var["name"],
uri=var["uri"],
group=var["group"],
extra=var["extra"],
watchers=[
SerializedAssetWatcher(
name=watcher["name"],
trigger={
"classpath": watcher["trigger"]["classpath"],
"kwargs": smart_decode_trigger_kwargs(watcher["trigger"]["kwargs"]),
},
)
for watcher in watchers
],
)
def encode_outlet_event_accessor(var: OutletEventAccessor) -> dict[str, Any]:
key = var.key
return {
"key": BaseSerialization.serialize(key),
"extra": var.extra,
"asset_alias_events": [attrs.asdict(cast("attrs.AttrsInstance", e)) for e in var.asset_alias_events],
}
def decode_outlet_event_accessor(var: dict[str, Any]) -> OutletEventAccessor:
asset_alias_events = var.get("asset_alias_events", [])
outlet_event_accessor = OutletEventAccessor(
key=BaseSerialization.deserialize(var["key"]),
extra=var["extra"],
asset_alias_events=[
AssetAliasEvent(
source_alias_name=e["source_alias_name"],
dest_asset_key=AssetUniqueKey(
name=e["dest_asset_key"]["name"], uri=e["dest_asset_key"]["uri"]
),
# fallback for backward compatibility
dest_asset_extra=e.get("dest_asset_extra", {}),
extra=e["extra"],
)
for e in asset_alias_events
],
)
return outlet_event_accessor
def encode_outlet_event_accessors(var: OutletEventAccessors) -> dict[str, Any]:
return {
"__type": DAT.ASSET_EVENT_ACCESSORS,
"_dict": [
{"key": BaseSerialization.serialize(k), "value": encode_outlet_event_accessor(v)}
for k, v in var._dict.items()
],
}
def decode_outlet_event_accessors(var: dict[str, Any]) -> OutletEventAccessors:
d = OutletEventAccessors()
d._dict = {
BaseSerialization.deserialize(row["key"]): decode_outlet_event_accessor(row["value"])
for row in var["_dict"]
}
return d
def encode_timetable(var: Timetable) -> dict[str, Any]:
"""
Encode a timetable instance.
This delegates most of the serialization work to the type, so the behavior
can be completely controlled by a custom subclass.
:meta private:
"""
timetable_class = type(var)
importable_string = qualname(timetable_class)
if _get_registered_timetable(importable_string) is None:
raise _TimetableNotRegistered(importable_string)
return {Encoding.TYPE: importable_string, Encoding.VAR: var.serialize()}
def decode_timetable(var: dict[str, Any]) -> Timetable:
"""
Decode a previously serialized timetable.
Most of the deserialization logic is delegated to the actual type, which
we import from string.
:meta private:
"""
importable_string = var[Encoding.TYPE]
timetable_class = _get_registered_timetable(importable_string)
if timetable_class is None:
raise _TimetableNotRegistered(importable_string)
return timetable_class.deserialize(var[Encoding.VAR])
def encode_priority_weight_strategy(var: PriorityWeightStrategy) -> str:
"""
Encode a priority weight strategy instance.
In this version, we only store the importable string, so the class should not wait
for any parameters to be passed to it. If you need to store the parameters, you
should store them in the class itself.
"""
priority_weight_strategy_class = type(var)
if priority_weight_strategy_class in airflow_priority_weight_strategies_classes:
return airflow_priority_weight_strategies_classes[priority_weight_strategy_class]
importable_string = qualname(priority_weight_strategy_class)
if _get_registered_priority_weight_strategy(importable_string) is None:
raise _PriorityWeightStrategyNotRegistered(importable_string)
return importable_string
def decode_priority_weight_strategy(var: str) -> PriorityWeightStrategy:
"""
Decode a previously serialized priority weight strategy.
In this version, we only store the importable string, so we just need to get the class
from the dictionary of registered classes and instantiate it with no parameters.
"""
priority_weight_strategy_class = _get_registered_priority_weight_strategy(var)
if priority_weight_strategy_class is None:
raise _PriorityWeightStrategyNotRegistered(var)
return priority_weight_strategy_class()
def encode_start_trigger_args(var: StartTriggerArgs) -> dict[str, Any]:
"""
Encode a StartTriggerArgs.
:meta private:
"""
def serialize_kwargs(key: str) -> Any:
if (val := getattr(var, key)) is None:
return None
return BaseSerialization.serialize(val)
return {
"__type": "START_TRIGGER_ARGS",
"trigger_cls": var.trigger_cls,
"trigger_kwargs": serialize_kwargs("trigger_kwargs"),
"next_method": var.next_method,
"next_kwargs": serialize_kwargs("next_kwargs"),
"timeout": var.timeout.total_seconds() if var.timeout else None,
}
def decode_start_trigger_args(var: dict[str, Any]) -> StartTriggerArgs:
"""
Decode a StartTriggerArgs.
:meta private:
"""
def deserialize_kwargs(key: str) -> Any:
if (val := var[key]) is None:
return None
return BaseSerialization.deserialize(val)
return StartTriggerArgs(
trigger_cls=var["trigger_cls"],
trigger_kwargs=deserialize_kwargs("trigger_kwargs"),
next_method=var["next_method"],
next_kwargs=deserialize_kwargs("next_kwargs"),
timeout=datetime.timedelta(seconds=var["timeout"]) if var["timeout"] else None,
)
| _PriorityWeightStrategyNotRegistered |
python | numpy__numpy | numpy/_core/tests/test_array_coercion.py | {
"start": 6418,
"end": 14957
} | class ____:
def test_void_special_case(self):
# Void dtypes with structures discover tuples as elements
arr = np.array((1, 2, 3), dtype="i,i,i")
assert arr.shape == ()
arr = np.array([(1, 2, 3)], dtype="i,i,i")
assert arr.shape == (1,)
def test_char_special_case(self):
arr = np.array("string", dtype="c")
assert arr.shape == (6,)
assert arr.dtype.char == "c"
arr = np.array(["string"], dtype="c")
assert arr.shape == (1, 6)
assert arr.dtype.char == "c"
def test_char_special_case_deep(self):
# Check that the character special case errors correctly if the
# array is too deep:
nested = ["string"] # 2 dimensions (due to string being sequence)
for i in range(ncu.MAXDIMS - 2):
nested = [nested]
arr = np.array(nested, dtype='c')
assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,)
with pytest.raises(ValueError):
np.array([nested], dtype="c")
def test_unknown_object(self):
arr = np.array(object())
assert arr.shape == ()
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar(self, scalar):
arr = np.array(scalar)
assert arr.shape == ()
assert arr.dtype == scalar.dtype
arr = np.array([[scalar, scalar]])
assert arr.shape == (1, 2)
assert arr.dtype == scalar.dtype
# Additionally to string this test also runs into a corner case
# with datetime promotion (the difference is the promotion order).
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
def test_scalar_promotion(self):
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
sc1, sc2 = sc1.values[0], sc2.values[0]
# test all combinations:
try:
arr = np.array([sc1, sc2])
except (TypeError, ValueError):
# The promotion between two times can fail
# XFAIL (ValueError): Some object casts are currently undefined
continue
assert arr.shape == (2,)
try:
dt1, dt2 = sc1.dtype, sc2.dtype
expected_dtype = np.promote_types(dt1, dt2)
assert arr.dtype == expected_dtype
except TypeError as e:
# Will currently always go to object dtype
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
# types. It includes some paths not directly related to `np.array`.
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
# Use casting from object:
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
# Test various ways to create an array containing this scalar:
arr1 = np.array(scalar).reshape(1)
arr2 = np.array([scalar])
arr3 = np.empty(1, dtype=scalar.dtype)
arr3[0] = scalar
arr4 = np.empty(1, dtype=scalar.dtype)
arr4[:] = [scalar]
# All of these methods should yield the same results
assert_array_equal(arr, arr1)
assert_array_equal(arr, arr2)
assert_array_equal(arr, arr3)
assert_array_equal(arr, arr4)
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
@pytest.mark.parametrize("cast_to", scalar_instances())
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
"""
Test that in most cases:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
should behave the same. The only exceptions are parametric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
for scalar in scalar_instances(times=False):
scalar = scalar.values[0]
if dtype.type == np.void:
if scalar.dtype.fields is not None and dtype.fields is None:
# Here, coercion to "V6" works, but the cast fails.
# Since the types are identical, SETITEM takes care of
# this, but has different rules than the cast.
with pytest.raises(TypeError):
np.array(scalar).astype(dtype)
np.array(scalar, dtype=dtype)
np.array([scalar], dtype=dtype)
continue
# The main test, we first try to use casting and if it succeeds
# continue below testing that things are the same, otherwise
# test that the alternative paths at least also fail.
try:
cast = np.array(scalar).astype(dtype)
except (TypeError, ValueError, RuntimeError):
# coercion should also raise (error type may change)
with pytest.raises(Exception): # noqa: B017
np.array(scalar, dtype=dtype)
if (isinstance(scalar, rational) and
np.issubdtype(dtype, np.signedinteger)):
return
with pytest.raises(Exception): # noqa: B017
np.array([scalar], dtype=dtype)
# assignment should also raise
res = np.zeros((), dtype=dtype)
with pytest.raises(Exception): # noqa: B017
res[()] = scalar
return
# Non error path:
arr = np.array(scalar, dtype=dtype)
assert_array_equal(arr, cast)
# assignment behaves the same
ass = np.zeros((), dtype=dtype)
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
def test_pyscalar_subclasses(self, pyscalar):
"""NumPy arrays are read/write which means that anything but invariant
behaviour is on thin ice. However, we currently are happy to discover
subclasses of Python float, int, complex the same as the base classes.
This should potentially be deprecated.
"""
class MyScalar(type(pyscalar)):
pass
res = np.array(MyScalar(pyscalar))
expected = np.array(pyscalar)
assert_array_equal(res, expected)
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
def test_default_dtype_instance(self, dtype_char):
if dtype_char in "SU":
dtype = np.dtype(dtype_char + "1")
elif dtype_char == "V":
# Legacy behaviour was to use V8. The reason was float64 being the
# default dtype and that having 8 bytes.
dtype = np.dtype("V8")
else:
dtype = np.dtype(dtype_char)
discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype))
assert discovered_dtype == dtype
assert discovered_dtype.itemsize == dtype.itemsize
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize(["scalar", "error"],
[(np.float64(np.nan), ValueError),
(np.array(-1).astype(np.ulonglong)[()], OverflowError)])
def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
"""
Signed integers are currently different in that they do not cast other
NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
# This is a special case using casting logic. It warns for the NaN
# but allows the cast (giving undefined behaviour).
with np.errstate(invalid="ignore"):
coerced = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(coerced, cast)
# However these fail:
with pytest.raises(error):
np.array([scalar], dtype=dtype)
with pytest.raises(error):
cast[()] = scalar
| TestScalarDiscovery |
python | django__django | tests/apps/query_performing_app/apps.py | {
"start": 2568,
"end": 2672
} | class ____(StoredProcedureQueryAppConfig):
database = "other"
| QueryOtherDatabaseStoredProcedureAppConfig |
python | scikit-learn__scikit-learn | sklearn/manifold/_classical_mds.py | {
"start": 508,
"end": 6381
} | class ____(BaseEstimator):
"""Classical multidimensional scaling (MDS).
This is also known as principal coordinates analysis (PCoA) or
Torgerson's scaling. It is a version of MDS that has exact solution
in terms of eigendecomposition. If the input dissimilarity matrix
consists of the pairwise Euclidean distances between some vectors,
then classical MDS is equivalent to PCA applied to this set of vectors.
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, default=2
Number of embedding dimensions.
metric : str or callable, default='euclidean'
Metric to use for dissimilarity computation. Default is "euclidean".
If metric is a string, it must be one of the options allowed by
`scipy.spatial.distance.pdist` for its metric parameter, or a metric
listed in :func:`sklearn.metrics.pairwise.distance_metrics`
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
metric_params : dict, default=None
Additional keyword arguments for the dissimilarity computation.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Stores the position of the dataset in the embedding space.
dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points.
eigenvalues_ : ndarray of shape (n_components,)
Eigenvalues of the double-centered dissimilarity matrix, corresponding
to each of the selected components. They are equal to the squared 2-norms
of the `n_components` variables in the embedding space.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis.
MDS : Metric and non-metric MDS.
References
----------
.. [1] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import ClassicalMDS
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> cmds = ClassicalMDS(n_components=2)
>>> X_emb = cmds.fit_transform(X[:100])
>>> X_emb.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"metric": [str, callable],
"metric_params": [dict, None],
}
def __init__(
self,
n_components=2,
*,
metric="euclidean",
metric_params=None,
):
self.n_components = n_components
self.metric = metric
self.metric_params = metric_params
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = self.metric == "precomputed"
return tags
def fit(self, X, y=None):
"""
Compute the embedding positions.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self.fit_transform(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""
Compute and return the embedding positions.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The embedding coordinates.
"""
X = validate_data(self, X)
if self.metric == "precomputed":
self.dissimilarity_matrix_ = X
self.dissimilarity_matrix_ = check_symmetric(
self.dissimilarity_matrix_, raise_exception=True
)
else:
self.dissimilarity_matrix_ = pairwise_distances(
X,
metric=self.metric,
**(self.metric_params if self.metric_params is not None else {}),
)
# Double centering
B = self.dissimilarity_matrix_**2
B = B.astype(np.float64)
B -= np.mean(B, axis=0)
B -= np.mean(B, axis=1, keepdims=True)
B *= -0.5
# Eigendecomposition
w, U = linalg.eigh(B)
# Reversing the order of the eigenvalues/eigenvectors to put
# the eigenvalues in decreasing order
w = w[::-1][: self.n_components]
U = U[:, ::-1][:, : self.n_components]
# Set the signs of eigenvectors to enforce deterministic output
U, _ = svd_flip(U, None)
self.embedding_ = np.sqrt(w) * U
self.eigenvalues_ = w
return self.embedding_
| ClassicalMDS |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_condition_evaluations.py | {
"start": 7227,
"end": 8529
} | class ____(graphene.ObjectType):
rootUniqueId = graphene.NonNull(graphene.String)
evaluationNodes = non_null_list(GrapheneAssetConditionEvaluationNode)
class Meta:
name = "AssetConditionEvaluation"
def __init__(
self,
root_evaluation: AutomationConditionEvaluation,
partition_key: Optional[str] = None,
):
all_evaluations = _flatten_evaluation(root_evaluation)
if root_evaluation.true_subset.is_partitioned:
if partition_key is None:
evaluationNodes = [
GraphenePartitionedAssetConditionEvaluationNode(evaluation)
for evaluation in all_evaluations
]
else:
evaluationNodes = [
GrapheneSpecificPartitionAssetConditionEvaluationNode(evaluation, partition_key)
for evaluation in all_evaluations
]
else:
evaluationNodes = [
GrapheneUnpartitionedAssetConditionEvaluationNode(evaluation)
for evaluation in all_evaluations
]
super().__init__(
rootUniqueId=root_evaluation.condition_snapshot.unique_id,
evaluationNodes=evaluationNodes,
)
| GrapheneAssetConditionEvaluation |
python | kamyu104__LeetCode-Solutions | Python/lonely-pixel-i.py | {
"start": 671,
"end": 961
} | class ____(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:type N: int
:rtype: int
"""
return sum(col.count('B') == 1 == picture[col.index('B')].count('B') \
for col in zip(*picture))
| Solution2 |
python | PrefectHQ__prefect | src/prefect/context.py | {
"start": 8660,
"end": 10614
} | class ____(ContextModel):
"""
A context for managing the sync Prefect client instances.
Clients were formerly tracked on the TaskRunContext and FlowRunContext, but
having two separate places and the addition of both sync and async clients
made it difficult to manage. This context is intended to be the single
source for sync clients.
The client creates a sync client, which can either be read directly from
the context object OR loaded with get_client, inject_client, or other
Prefect utilities.
with SyncClientContext.get_or_create() as ctx:
c1 = get_client(sync_client=True)
c2 = get_client(sync_client=True)
assert c1 is c2
assert c1 is ctx.client
"""
__var__: ClassVar[ContextVar[Self]] = ContextVar("sync-client-context")
client: SyncPrefectClient
_httpx_settings: Optional[dict[str, Any]] = PrivateAttr(None)
_context_stack: int = PrivateAttr(0)
def __init__(self, httpx_settings: Optional[dict[str, Any]] = None) -> None:
super().__init__(
client=get_client(sync_client=True, httpx_settings=httpx_settings),
)
self._httpx_settings = httpx_settings
self._context_stack = 0
def __enter__(self) -> Self:
self._context_stack += 1
if self._context_stack == 1:
self.client.__enter__()
self.client.raise_for_api_version_mismatch()
return super().__enter__()
else:
return self
def __exit__(self, *exc_info: Any) -> None:
self._context_stack -= 1
if self._context_stack == 0:
self.client.__exit__(*exc_info)
return super().__exit__(*exc_info)
@classmethod
@contextmanager
def get_or_create(cls) -> Generator[Self, None, None]:
ctx = cls.get()
if ctx:
yield ctx
else:
with cls() as ctx:
yield ctx
| SyncClientContext |
python | pandas-dev__pandas | pandas/io/pytables.py | {
"start": 13574,
"end": 65692
} | class ____:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
These additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore("test.h5")
>>> store["foo"] = bar # write to HDF5
>>> bar = store["foo"] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore("test.h5", driver="H5FD_CORE")
>>> store["foo"] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: File | None
_mode: str
def __init__(
self,
path,
mode: str = "a",
complevel: int | None = None,
complib=None,
fletcher32: bool = False,
**kwargs,
) -> None:
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self) -> str:
return self._path
@property
def root(self):
"""return the root node"""
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self) -> str:
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value) -> None:
self.put(key, value)
def __delitem__(self, key: str) -> int | None:
return self.remove(key)
def __getattr__(self, name: str):
"""allow attribute access to get stores"""
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if key in (name, name[1:]):
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
def items(self) -> Iterator[tuple[str, list]]:
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
def open(self, mode: str = "a", **kwargs) -> None:
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self) -> None:
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False) -> None:
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Object to retrieve from file. Raises KeyError if not found.
Returns
-------
object
Same type as object stored in file.
See Also
--------
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
See Also
--------
HDFStore.select_as_coordinates : Returns the selection as an index.
HDFStore.select_column : Returns a single column from the table.
HDFStore.select_as_multiple : Retrieves pandas objects from multiple tables.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select("/data1") # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select("/data1", where="columns == A") # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys, strict=True)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: DataFrame | Series,
format=None,
index: bool = True,
append: bool = False,
complib=None,
complevel: int | None = None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
) -> None:
"""
Store object in HDFStore.
This method writes a pandas DataFrame or Series into an HDF5 file using
either the fixed or table format. The `table` format allows additional
operations like incremental appends and queries but may have performance
trade-offs. The `fixed` format provides faster read/write operations but
does not support appends or queries.
Parameters
----------
key : str
Key of object to store in file.
value : {Series, DataFrame}
Value of object to store in file.
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
complib : default None
This parameter is currently not accepted.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
min_itemsize : int, dict, or None
Dict of columns that specify minimum str sizes.
nan_rep : str
Str to use as str nan representation.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
errors : str, default 'strict'
The error handling scheme to use for encoding errors.
The default is 'strict' meaning that encoding errors raise a
UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None) -> int | None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
return None
# delete from the table
if not s.is_table:
raise ValueError("can only remove with where on objects written as tables")
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: DataFrame | Series,
format=None,
axes=None,
index: bool | list[str] = True,
append: bool = True,
complib=None,
complevel: int | None = None,
columns=None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
) -> None:
"""
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
Key of object to append.
value : {Series, DataFrame}
Value of object to append.
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
axes : default None
This parameter is currently not accepted.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
complib : default None
This parameter is currently not accepted.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
columns : default None
This parameter is currently not accepted, try data_columns.
min_itemsize : int, dict, or None
Dict of columns that specify minimum str sizes.
nan_rep : str
Str to use as str nan representation.
chunksize : int or None
Size to chunk the writing.
expectedrows : int
Expected TOTAL row size of this table.
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : default None
Provide an encoding for str.
errors : str, default 'strict'
The error handling scheme to use for encoding errors.
The default is 'strict' meaning that encoding errors raise a
UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
See Also
--------
HDFStore.append_to_multiple : Append to multiple tables.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df1, format="table") # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
>>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
1 3 4
0 5 6
1 7 8
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: dict,
value,
selector,
data_columns=None,
axes=None,
dropna: bool = False,
**kwargs,
) -> None:
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)])))
# figure out how to split the value
remain_key = None
remain_values: list = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: int | None = None,
kind: str | None = None,
) -> None:
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
See Also
--------
HDFStore.get_node : Returns the node with the key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
See Also
--------
HDFStore.info : Prints detailed information on the store.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df1, format="table") # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
>>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Node | None:
"""return the node with the key or None if it does not exist"""
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> GenericFixed | Table:
"""return the storer object for a key, raise if not in the file"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode: str = "w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: int | None = None,
fletcher32: bool = False,
overwrite: bool = True,
) -> HDFStore:
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: bool | list[str] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
A String containing the python pandas class name, filepath to the HDF5
file and all the object keys along with their respective dataframe shapes.
See Also
--------
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["C", "D"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data1", df1) # doctest: +SKIP
>>> store.put("data2", df2) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data1 frame (shape->[2,2])
/data2 frame (shape->[2,2])
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if lkeys:
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self) -> None:
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
"""validate / deprecate formats"""
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: DataFrame | Series | None = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> GenericFixed | Table:
"""return a suitable class to operate"""
cls: type[GenericFixed | Table]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
pt = getattr(group._v_attrs, "pandas_type", None)
tt = getattr(group._v_attrs, "table_type", None)
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_STORER_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt] # type: ignore[index]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_TABLE_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: DataFrame | Series,
format,
axes=None,
index: bool | list[str] = True,
append: bool = False,
complib=None,
complevel: int | None = None,
fletcher32=None,
min_itemsize: int | dict[str, int] | None = None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
) -> None:
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
| HDFStore |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/service.py | {
"start": 1310,
"end": 1560
} | class ____(Exception):
def __init__(self, service_name: str, method_name: str | None, message: str) -> None:
name = f"{service_name}.{method_name}" if method_name else service_name
super().__init__(f"{name}: {message}")
| RpcException |
python | urllib3__urllib3 | test/with_dummyserver/test_connectionpool.py | {
"start": 44774,
"end": 51691
} | class ____(HypercornDummyServerTestCase):
def test_max_retry(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/redirect", fields={"target": "/"}, retries=0)
def test_disabled_retry(self) -> None:
"""Disabled retries should disable redirect handling."""
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/redirect", fields={"target": "/"}, retries=False)
assert r.status == 303
r = pool.request(
"GET",
"/redirect",
fields={"target": "/"},
retries=Retry(redirect=False),
)
assert r.status == 303
with HTTPConnectionPool(
"thishostdoesnotexist.invalid", self.port, timeout=0.001
) as pool:
with pytest.raises(NameResolutionError):
pool.request("GET", "/test", retries=False)
def test_read_retries(self) -> None:
"""Should retry for status codes in the forcelist"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(read=1, status_forcelist=[418])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_read_retries"},
retries=retry,
)
assert resp.status == 200
def test_read_total_retries(self) -> None:
"""HTTP response w/ status code in the forcelist should be retried"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_total_retries"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retries_wrong_forcelist(self) -> None:
"""HTTP response w/ status code not in forcelist shouldn't be retried"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[202])
resp = pool.request(
"GET",
"/successful_retry",
headers={"test-name": "test_wrong_forcelist"},
retries=retry,
)
assert resp.status == 418
def test_default_method_forcelist_retried(self) -> None:
"""urllib3 should retry methods in the default method forcelist"""
with HTTPConnectionPool(self.host, self.port) as pool:
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"OPTIONS",
"/successful_retry",
headers={"test-name": "test_default_forcelist"},
retries=retry,
)
assert resp.status == 200
def test_retries_wrong_method_list(self) -> None:
"""Method not in our allowed list should not be retried, even if code matches"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_wrong_allowed_method"}
retry = Retry(total=1, status_forcelist=[418], allowed_methods=["POST"])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 418
def test_read_retries_unsuccessful(
self,
) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_read_retries_unsuccessful"}
resp = pool.request("GET", "/successful_retry", headers=headers, retries=1)
assert resp.status == 418
def test_retry_reuse_safe(self) -> None:
"""It should be possible to reuse a Retry object across requests"""
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_safe"}
retry = Retry(total=1, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
def test_retry_return_in_response(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
headers = {"test-name": "test_retry_return_in_response"}
retry = Retry(total=2, status_forcelist=[418])
resp = pool.request(
"GET", "/successful_retry", headers=headers, retries=retry
)
assert resp.status == 200
assert resp.retries is not None
assert resp.retries.total == 1
assert resp.retries.history == (
RequestHistory("GET", "/successful_retry", None, 418, None),
)
def test_retry_redirect_history(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
resp = pool.request("GET", "/redirect", fields={"target": "/"})
assert resp.status == 200
assert resp.retries is not None
assert resp.retries.history == (
RequestHistory("GET", "/redirect?target=%2F", None, 303, "/"),
)
def test_multi_redirect_history(self) -> None:
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
fields={"redirect_codes": "303,302,200"},
redirect=False,
)
assert r.status == 303
assert r.retries is not None
assert r.retries.history == tuple()
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request(
"GET",
"/multi_redirect",
retries=10,
fields={"redirect_codes": "303,302,301,307,302,200"},
)
assert r.status == 200
assert r.data == b"Done redirecting"
expected = [
(303, "/multi_redirect?redirect_codes=302,301,307,302,200"),
(302, "/multi_redirect?redirect_codes=301,307,302,200"),
(301, "/multi_redirect?redirect_codes=307,302,200"),
(307, "/multi_redirect?redirect_codes=302,200"),
(302, "/multi_redirect?redirect_codes=200"),
]
assert r.retries is not None
actual = [
(history.status, history.redirect_location)
for history in r.retries.history
]
assert actual == expected
| TestRetry |
python | pytorch__pytorch | torch/ao/quantization/_correct_bias.py | {
"start": 1226,
"end": 5415
} | class ____(ns.Logger):
"""Mean Logger for a Shadow module.
A logger for a Shadow module whose purpose is to record the rolling mean
of the data passed to the floating point and quantized models
"""
def __init__(self):
"""Set up initial values for float and quantized stats, count, float sum, and quant sum."""
super().__init__()
self.stats["float"] = None
self.stats["quantized"] = None
self.count = 0
self.float_sum = None
self.quant_sum = None
def forward(self, x, y): # type: ignore[override]
"""Compute the average of quantized and floating-point data from modules.
The inputs x,y are output data from the quantized and floating-point modules.
x is for the quantized module, y is for the floating point module
"""
if x.is_quantized:
x = x.dequantize()
self.count += 1
if self.stats["quantized"] is None:
self.stats["quantized"] = x
self.quant_sum = x
else:
self.quant_sum += x
self.stats["quantized"] = self.quant_sum / self.count
if self.stats["float"] is None:
self.stats["float"] = y
self.float_sum = y
else:
self.float_sum += y
self.stats["float"] = self.float_sum / self.count
def clear(self):
self.stats["float"] = None
self.stats["quantized"] = None
self.count = 0
self.float_sum = None
self.quant_sum = None
def bias_correction(
float_model,
quantized_model,
img_data,
target_modules=_supported_modules_quantized,
neval_batches=None,
):
"""Perform bias correction on a module.
Using numeric suite shadow module, the expected output of the floating point and quantized modules
is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
by quantization
Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)
Args:
float_model: a trained model that serves as a reference to what bias correction should aim for
quantized_model: quantized form of float_model that bias correction is to applied to
img_data: calibration data to estimate the expected output (used to find quantization error)
target_modules: specifies what submodules in quantized_model need bias correction (can be extended to
unquantized submodules)
neval_batches: a cap to the number of batches you want to be used for estimating the expected output
"""
ns.prepare_model_with_stubs(
float_model, quantized_model, _supported_modules, MeanShadowLogger
)
uncorrected_modules = {
name: submodule
for name, submodule in quantized_model.named_modules()
if type(submodule) in target_modules
}
for uncorrected_module in uncorrected_modules:
quantized_submodule = get_module(quantized_model, uncorrected_module)
bias = get_param(quantized_submodule, "bias")
if bias is not None:
for count, data in enumerate(img_data, start=1):
quantized_model(data[0])
if count == neval_batches:
break
ob_dict = ns.get_logger_dict(quantized_model)
parent_name, _ = parent_child_names(uncorrected_module)
float_data = ob_dict[parent_name + ".stats"]["float"]
quant_data = ob_dict[parent_name + ".stats"]["quantized"]
# math for expected_error
quantization_error = quant_data - float_data
dims = list(range(quantization_error.dim()))
# Note: we don't want to take the mean over the output channel dimension
dims.remove(1)
expected_error = torch.mean(quantization_error, dims)
updated_bias = bias.data - expected_error
bias.data = updated_bias
# Resets the data contained in the loggers
for submodule in quantized_model.modules():
if isinstance(submodule, MeanShadowLogger):
submodule.clear()
| MeanShadowLogger |
python | kubernetes-client__python | kubernetes/client/models/v1_node_system_info.py | {
"start": 383,
"end": 15034
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'swap': 'V1NodeSwapStatus',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'swap': 'swap',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, swap=None, system_uuid=None, local_vars_configuration=None): # noqa: E501
"""V1NodeSystemInfo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._swap = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
if swap is not None:
self.swap = swap
self.system_uuid = system_uuid
@property
def architecture(self):
"""Gets the architecture of this V1NodeSystemInfo. # noqa: E501
The Architecture reported by the node # noqa: E501
:return: The architecture of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node # noqa: E501
:param architecture: The architecture of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and architecture is None: # noqa: E501
raise ValueError("Invalid value for `architecture`, must not be `None`") # noqa: E501
self._architecture = architecture
@property
def boot_id(self):
"""Gets the boot_id of this V1NodeSystemInfo. # noqa: E501
Boot ID reported by the node. # noqa: E501
:return: The boot_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node. # noqa: E501
:param boot_id: The boot_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and boot_id is None: # noqa: E501
raise ValueError("Invalid value for `boot_id`, must not be `None`") # noqa: E501
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""Gets the container_runtime_version of this V1NodeSystemInfo. # noqa: E501
ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). # noqa: E501
:return: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). # noqa: E501
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container_runtime_version is None: # noqa: E501
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`") # noqa: E501
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""Gets the kernel_version of this V1NodeSystemInfo. # noqa: E501
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:return: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:param kernel_version: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kernel_version is None: # noqa: E501
raise ValueError("Invalid value for `kernel_version`, must not be `None`") # noqa: E501
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""Gets the kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
Deprecated: KubeProxy Version reported by the node. # noqa: E501
:return: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""Sets the kube_proxy_version of this V1NodeSystemInfo.
Deprecated: KubeProxy Version reported by the node. # noqa: E501
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kube_proxy_version is None: # noqa: E501
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`") # noqa: E501
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""Gets the kubelet_version of this V1NodeSystemInfo. # noqa: E501
Kubelet Version reported by the node. # noqa: E501
:return: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node. # noqa: E501
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kubelet_version is None: # noqa: E501
raise ValueError("Invalid value for `kubelet_version`, must not be `None`") # noqa: E501
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""Gets the machine_id of this V1NodeSystemInfo. # noqa: E501
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:return: The machine_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:param machine_id: The machine_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and machine_id is None: # noqa: E501
raise ValueError("Invalid value for `machine_id`, must not be `None`") # noqa: E501
self._machine_id = machine_id
@property
def operating_system(self):
"""Gets the operating_system of this V1NodeSystemInfo. # noqa: E501
The Operating System reported by the node # noqa: E501
:return: The operating_system of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node # noqa: E501
:param operating_system: The operating_system of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operating_system is None: # noqa: E501
raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
self._operating_system = operating_system
@property
def os_image(self):
"""Gets the os_image of this V1NodeSystemInfo. # noqa: E501
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:return: The os_image of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:param os_image: The os_image of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and os_image is None: # noqa: E501
raise ValueError("Invalid value for `os_image`, must not be `None`") # noqa: E501
self._os_image = os_image
@property
def swap(self):
"""Gets the swap of this V1NodeSystemInfo. # noqa: E501
:return: The swap of this V1NodeSystemInfo. # noqa: E501
:rtype: V1NodeSwapStatus
"""
return self._swap
@swap.setter
def swap(self, swap):
"""Sets the swap of this V1NodeSystemInfo.
:param swap: The swap of this V1NodeSystemInfo. # noqa: E501
:type: V1NodeSwapStatus
"""
self._swap = swap
@property
def system_uuid(self):
"""Gets the system_uuid of this V1NodeSystemInfo. # noqa: E501
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
:return: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
:param system_uuid: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and system_uuid is None: # noqa: E501
raise ValueError("Invalid value for `system_uuid`, must not be `None`") # noqa: E501
self._system_uuid = system_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeSystemInfo):
return True
return self.to_dict() != other.to_dict()
| V1NodeSystemInfo |
python | has2k1__plotnine | plotnine/scales/scale_shape.py | {
"start": 1512,
"end": 1821
} | class ____(scale_shape):
"""
Scale for shapes
"""
_aesthetics = ["shape"]
def __post_init__(self, unfilled):
warn(
"Using shapes for an ordinal variable is not advised.",
PlotnineWarning,
)
super().__post_init__(unfilled)
| scale_shape_ordinal |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 3701,
"end": 3839
} | class ____(AirflowException):
"""Raise when there is a violation of a Cluster Policy in DAG definition."""
| AirflowClusterPolicyViolation |
python | cython__cython | tests/run/methodmangling_T5.py | {
"start": 8887,
"end": 9146
} | class ____(__NameWithDunder):
"""
Compile check that it can find the base class
>>> x = Inherits()
"""
pass
def regular_function(__x, dummy=None):
# as before, dummy stops Cython creating a 1 arg, non-keyword call
return __x
| Inherits |
python | pypa__pipenv | pipenv/project.py | {
"start": 4095,
"end": 60324
} | class ____:
"""docstring for Project"""
_lockfile_encoder = _LockFileEncoder()
def __init__(self, python_version=None, chdir=True):
self._name = None
self._virtualenv_location = None
self._download_location = None
self._proper_names_db_path = None
self._pipfile_location = None
self._pipfile_newlines = DEFAULT_NEWLINES
self._lockfile_newlines = DEFAULT_NEWLINES
self._requirements_location = None
self._original_dir = Path.cwd().resolve()
self._environment = None
self._build_system = {"requires": ["setuptools", "wheel"]}
self.python_version = python_version
self.sessions = {} # pip requests sessions
self.s = Setting()
# Load Pip configuration and get items
self.configuration = Configuration(isolated=False, load_only=None)
self.configuration.load()
pip_conf_indexes = []
for section_key, value in self.configuration.items():
key_parts = section_key.split(".", 1)
if key_parts[1] == "index-url":
try:
trusted_hosts = self.configuration.get_value(
f"{key_parts[0]}.trusted-host"
)
except ConfigurationError:
trusted_hosts = []
pip_conf_indexes.append(
{
"url": value,
"verify_ssl": not any(
trusted_host in value for trusted_host in trusted_hosts
)
and "https://" in value,
"name": f"pip_conf_index_{key_parts[0]}",
}
)
if pip_conf_indexes:
self.default_source = None
for pip_conf_index in pip_conf_indexes:
if self.default_source is None:
self.default_source = pip_conf_index
if is_pypi_url(pip_conf_index["url"]):
self.default_source = pip_conf_index
pip_conf_indexes.remove(self.default_source)
elif self.s.PIPENV_TEST_INDEX:
self.default_source = {
"url": self.s.PIPENV_TEST_INDEX,
"verify_ssl": True,
"name": "custom",
}
else:
self.default_source = {
"url": "https://pypi.org/simple",
"verify_ssl": True,
"name": "pypi",
}
default_sources_toml = f"[[source]]\n{tomlkit.dumps(self.default_source)}"
for pip_conf_index in pip_conf_indexes:
default_sources_toml += f"\n\n[[source]]\n{tomlkit.dumps(pip_conf_index)}"
plette.pipfiles.DEFAULT_SOURCE_TOML = default_sources_toml
# Hack to skip this during pipenv run, or -r.
if ("run" not in sys.argv) and chdir:
with contextlib.suppress(TypeError, AttributeError):
os.chdir(self.project_directory)
def path_to(self, p: str) -> Path:
"""Returns the absolute path to a given relative path."""
path = Path(p)
if path.is_absolute():
return path
return Path(self._original_dir) / p
def get_pipfile_section(self, section):
"""Returns the details from the section of the Project's Pipfile."""
return self.parsed_pipfile.get(section, {})
def get_package_categories(self, for_lockfile=False):
"""Ensure we get only package categories and that the default packages section is first."""
categories = set(self.parsed_pipfile.keys())
package_categories = (
categories - NON_CATEGORY_SECTIONS - {"packages", "dev-packages"}
)
if for_lockfile:
return ["default", "develop"] + list(package_categories)
else:
return ["packages", "dev-packages"] + list(package_categories)
def get_requests_session_for_source(self, source):
if not (source and source.get("name")):
return None
if self.sessions.get(source["name"]):
session = self.sessions[source["name"]]
else:
session = get_requests_session(
self.s.PIPENV_MAX_RETRIES,
source.get("verify_ssl", True),
cache_dir=self.s.PIPENV_CACHE_DIR,
source=source.get("url"),
)
self.sessions[source["name"]] = session
return session
@classmethod
def prepend_hash_types(cls, checksums, hash_type):
cleaned_checksums = set()
for checksum in checksums:
if not checksum:
continue
if not checksum.startswith(f"{hash_type}:"):
checksum = f"{hash_type}:{checksum}"
cleaned_checksums.add(checksum)
return sorted(cleaned_checksums)
def get_hash_from_link(self, hash_cache, link):
if link.hash and link.hash_name == FAVORITE_HASH:
return f"{link.hash_name}:{link.hash}"
return hash_cache.get_hash(link)
def get_hashes_from_pypi(self, ireq, source):
pkg_url = f"https://pypi.org/pypi/{ireq.name}/json"
session = self.get_requests_session_for_source(source)
if not session:
return None
try:
collected_hashes = set()
# Grab the hashes from the new warehouse API.
r = session.get(pkg_url, timeout=self.s.PIPENV_REQUESTS_TIMEOUT)
api_releases = r.json()["releases"]
cleaned_releases = {}
for api_version, api_info in api_releases.items():
api_version = clean_pkg_version(api_version)
cleaned_releases[api_version] = api_info
version = ""
if ireq.specifier:
spec = next(iter(s for s in ireq.specifier), None)
if spec:
version = spec.version
for release in cleaned_releases[version]:
collected_hashes.add(release["digests"][FAVORITE_HASH])
return self.prepend_hash_types(collected_hashes, FAVORITE_HASH)
except (ValueError, KeyError, ConnectionError):
if self.s.is_verbose():
err.print(
f"[bold][red]Warning[/red][/bold]: Error generating hash for {ireq.name}."
)
return None
def get_hashes_from_remote_index_urls(self, ireq, source):
normalized_name = normalize_name(ireq.name)
url_name = normalized_name.replace(".", "-")
pkg_url = f"{source['url']}/{url_name}/"
session = self.get_requests_session_for_source(source)
try:
collected_hashes = set()
response = session.get(pkg_url, timeout=self.s.PIPENV_REQUESTS_TIMEOUT)
parser = PackageIndexHTMLParser()
parser.feed(response.text)
hrefs = parser.urls
version = ""
if ireq.specifier:
spec = next(iter(s for s in ireq.specifier), None)
if spec:
version = spec.version
# We'll check if the href looks like a version-specific page (i.e., ends with '/')
for package_url in hrefs:
parsed_url = parse.urlparse(package_url)
if version in parsed_url.path and parsed_url.path.endswith("/"):
# This might be a version-specific page. Fetch and parse it
version_url = urljoin(pkg_url, package_url)
version_response = session.get(
version_url, timeout=self.s.PIPENV_REQUESTS_TIMEOUT
)
version_parser = PackageIndexHTMLParser()
version_parser.feed(version_response.text)
version_hrefs = version_parser.urls
# Process these new hrefs as potential wheels
for v_package_url in version_hrefs:
url_params = parse.urlparse(v_package_url).fragment
params_dict = parse.parse_qs(url_params)
if params_dict.get(FAVORITE_HASH):
collected_hashes.add(params_dict[FAVORITE_HASH][0])
else: # Fallback to downloading the file to obtain hash
v_package_full_url = urljoin(version_url, v_package_url)
link = Link(v_package_full_url)
file_hash = self.get_file_hash(session, link)
if file_hash:
collected_hashes.add(file_hash)
elif version in parse.unquote(package_url):
# Process the current href as a potential wheel from the main page
url_params = parse.urlparse(package_url).fragment
params_dict = parse.parse_qs(url_params)
if params_dict.get(FAVORITE_HASH):
collected_hashes.add(params_dict[FAVORITE_HASH][0])
else: # Fallback to downloading the file to obtain hash
package_full_url = urljoin(pkg_url, package_url)
link = Link(package_full_url)
file_hash = self.get_file_hash(session, link)
if file_hash:
collected_hashes.add(file_hash)
return self.prepend_hash_types(collected_hashes, FAVORITE_HASH)
except Exception:
if self.s.is_verbose():
err.print(
f"[bold red]Warning[/bold red]: Error generating hash for {ireq.name}"
)
return None
@staticmethod
def get_file_hash(session, link):
h = hashlib.new(FAVORITE_HASH)
err.print(f"Downloading file {link.filename} to obtain hash...")
with open_file(link.url, session) as fp:
if fp is None:
return None
for chunk in iter(lambda: fp.read(8096), b""):
h.update(chunk)
return f"{h.name}:{h.hexdigest()}"
@property
def name(self) -> str:
if self._name is None:
self._name = Path(self.pipfile_location).parent.name
return self._name
@property
def pipfile_exists(self) -> bool:
return Path(self.pipfile_location).is_file()
@property
def required_python_version(self) -> str:
if self.pipfile_exists:
required = self.parsed_pipfile.get("requires", {}).get("python_full_version")
if not required:
required = self.parsed_pipfile.get("requires", {}).get("python_version")
if required != "*":
return required
@property
def project_directory(self) -> str:
return str(Path(self.pipfile_location).parent.absolute())
@property
def requirements_exists(self) -> bool:
return bool(self.requirements_location)
def is_venv_in_project(self) -> bool:
if self.s.PIPENV_VENV_IN_PROJECT is False:
return False
return self.s.PIPENV_VENV_IN_PROJECT or (
self.project_directory and Path(self.project_directory, ".venv").is_dir()
)
@property
def virtualenv_exists(self) -> bool:
venv_path = Path(self.virtualenv_location)
scripts_dir = self.virtualenv_scripts_location
if venv_path.exists():
# existence of active.bat is dependent on the platform path prefix
# scheme, not platform itself. This handles special cases such as
# Cygwin/MinGW identifying as 'nt' platform, yet preferring a
# 'posix' path prefix scheme.
if scripts_dir.name == "Scripts":
activate_path = scripts_dir / "activate.bat"
else:
activate_path = scripts_dir / "activate"
return activate_path.is_file()
return False
def get_location_for_virtualenv(self) -> Path:
# If there's no project yet, set location based on config.
if not self.project_directory:
if self.is_venv_in_project():
return Path(".venv").absolute()
return get_workon_home().joinpath(self.virtualenv_name)
dot_venv = Path(self.project_directory) / ".venv"
# If there's no .venv in project root or it is a folder, set location based on config.
if not dot_venv.exists() or dot_venv.is_dir():
if self.is_venv_in_project():
return dot_venv
return get_workon_home().joinpath(self.virtualenv_name)
# Now we assume .venv in project root is a file. Use its content.
name = dot_venv.read_text().strip()
# If .venv file is empty, set location based on config.
if not name:
return get_workon_home().joinpath(self.virtualenv_name)
# If content looks like a path, use it as a relative path.
# Otherwise, use directory named after content in WORKON_HOME.
if looks_like_dir(name):
path = Path(self.project_directory) / name
return path.absolute()
return get_workon_home().joinpath(name)
@property
def installed_packages(self):
return self.environment.get_installed_packages()
@property
def installed_package_names(self):
return get_canonical_names([pkg.name for pkg in self.installed_packages])
@property
def lockfile_package_names(self) -> dict[str, set[str]]:
results = {
"combined": {},
}
for category in self.get_package_categories(for_lockfile=True):
category_packages = get_canonical_names(
self.lockfile_content[category].keys()
)
results[category] = set(category_packages)
results["combined"] = results["combined"] | category_packages
return results
@property
def pipfile_package_names(self) -> dict[str, set[str]]:
result = {}
combined = set()
for category in self.get_package_categories():
packages = self.get_pipfile_section(category)
keys = get_canonical_names(packages.keys())
combined |= keys
result[category] = keys
result["combined"] = combined
return result
def get_environment(self, allow_global: bool = False) -> Environment:
is_venv = is_in_virtualenv()
if allow_global and not is_venv:
prefix = sys.prefix
python = sys.executable
else:
prefix = self.virtualenv_location
python = None
sources = self.sources if self.sources else [self.default_source]
environment = Environment(
prefix=prefix,
python=python,
is_venv=is_venv,
sources=sources,
pipfile=self.parsed_pipfile,
project=self,
)
return environment
@property
def environment(self) -> Environment:
if not self._environment:
allow_global = self.s.PIPENV_USE_SYSTEM
self._environment = self.get_environment(allow_global=allow_global)
return self._environment
def get_outdated_packages(self) -> list[importlib_metadata.Distribution]:
return self.environment.get_outdated_packages(pre=self.pipfile.get("pre", False))
@classmethod
def _sanitize(cls, name: str) -> tuple[str, str]:
# Replace dangerous characters into '_'. The length of the sanitized
# project name is limited as 42 because of the limit of linux kernel
#
# 42 = 127 - len('/home//.local/share/virtualenvs//bin/python2') - 32 - len('-HASHHASH')
#
# 127 : BINPRM_BUF_SIZE - 1
# 32 : Maximum length of username
#
# References:
# https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
# http://www.tldp.org/LDP/abs/html/special-chars.html#FIELDREF
# https://github.com/torvalds/linux/blob/2bfe01ef/include/uapi/linux/binfmts.h#L18
return re.sub(r'[ &$`!*@"()\[\]\\\r\n\t]', "_", name)[0:42]
def _get_virtualenv_hash(self, name: str) -> str:
"""Get the name of the virtualenv adjusted for windows if needed
Returns (name, encoded_hash)
"""
def get_name(name, location):
name = self._sanitize(name)
hash = hashlib.sha256(location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
return name, encoded_hash[:8]
clean_name, encoded_hash = get_name(name, self.pipfile_location)
venv_name = f"{clean_name}-{encoded_hash}"
# This should work most of the time for
# Case-sensitive filesystems,
# In-project venv
# "Proper" path casing (on non-case-sensitive filesystems).
if (
not fnmatch.fnmatch("A", "a")
or self.is_venv_in_project()
or get_workon_home().joinpath(venv_name).exists()
):
return clean_name, encoded_hash
# Check for different capitalization of the same project.
for path in get_workon_home().iterdir():
if not is_virtual_environment(path):
continue
try:
env_name, hash_ = path.name.rsplit("-", 1)
except ValueError:
continue
if len(hash_) != 8 or env_name.lower() != name.lower():
continue
return get_name(env_name, self.pipfile_location.replace(name, env_name))
# Use the default if no matching env exists.
return clean_name, encoded_hash
@property
def virtualenv_name(self) -> str:
custom_name = self.s.PIPENV_CUSTOM_VENV_NAME
if custom_name:
return custom_name
sanitized, encoded_hash = self._get_virtualenv_hash(self.name)
suffix = ""
if self.s.PIPENV_PYTHON:
if Path(self.s.PIPENV_PYTHON).is_absolute():
suffix = f"-{Path(self.s.PIPENV_PYTHON).name}"
else:
suffix = f"-{self.s.PIPENV_PYTHON}"
# If the pipfile was located at '/home/user/MY_PROJECT/Pipfile',
# the name of its virtualenv will be 'my-project-wyUfYPqE'
return sanitized + "-" + encoded_hash + suffix
@property
def virtualenv_location(self) -> str:
# if VIRTUAL_ENV is set, use that.
virtualenv_env = os.getenv("VIRTUAL_ENV")
if (
"PIPENV_ACTIVE" not in os.environ
and not self.s.PIPENV_IGNORE_VIRTUALENVS
and virtualenv_env
):
return Path(virtualenv_env)
if not self._virtualenv_location: # Use cached version, if available.
if not self.project_directory:
raise RuntimeError("Project location not created nor specified")
location = self.get_location_for_virtualenv()
self._virtualenv_location = Path(location)
return self._virtualenv_location
@property
def virtualenv_src_location(self) -> Path:
if self.virtualenv_location:
loc = Path(self.virtualenv_location) / "src"
else:
loc = Path(self.project_directory) / "src"
loc.mkdir(parents=True, exist_ok=True)
return loc
@property
def virtualenv_scripts_location(self) -> Path:
return virtualenv_scripts_dir(self.virtualenv_location)
@property
def download_location(self) -> Path:
if self._download_location is None:
loc = Path(self.virtualenv_location) / "downloads"
self._download_location = loc
# Create the directory, if it doesn't exist.
self._download_location.mkdir(parents=True, exist_ok=True)
return self._download_location
@property
def proper_names_db_path(self) -> str:
if self._proper_names_db_path is None:
self._proper_names_db_path = Path(
self.virtualenv_location, "pipenv-proper-names.txt"
)
# Ensure the parent directory exists before touching the file
self._proper_names_db_path.parent.mkdir(parents=True, exist_ok=True)
self._proper_names_db_path.touch() # Ensure the file exists.
return self._proper_names_db_path
@property
def proper_names(self) -> str:
with self.proper_names_db_path.open() as f:
return f.read().splitlines()
def register_proper_name(self, name: str) -> None:
"""Registers a proper name to the database."""
with self.proper_names_db_path.open("a") as f:
f.write(f"{name}\n")
@property
def pipfile_location(self) -> str:
from pipenv.utils.pipfile import find_pipfile
if self.s.PIPENV_PIPFILE:
return self.s.PIPENV_PIPFILE
if self._pipfile_location is None:
try:
loc = find_pipfile(max_depth=self.s.PIPENV_MAX_DEPTH)
except RuntimeError:
loc = "Pipfile"
self._pipfile_location = normalize_pipfile_path(loc)
return self._pipfile_location
@property
def requirements_location(self) -> str | None:
if self._requirements_location is None:
try:
loc = find_requirements(max_depth=self.s.PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._requirements_location = loc
return self._requirements_location
@property
def parsed_pipfile(self) -> tomlkit.toml_document.TOMLDocument | TPipfile:
"""Parse Pipfile into a TOMLFile"""
contents = self.read_pipfile()
return self._parse_pipfile(contents)
def read_pipfile(self) -> str:
# Open the pipfile, read it into memory.
if not self.pipfile_exists:
return ""
with open(self.pipfile_location) as f:
contents = f.read()
self._pipfile_newlines = preferred_newlines(f)
return contents
def _parse_pipfile(
self, contents: str
) -> tomlkit.toml_document.TOMLDocument | TPipfile:
try:
return tomlkit.parse(contents)
except Exception:
# We lose comments here, but it's for the best.)
# Fallback to toml parser, for large files.
return toml.loads(contents)
def _read_pyproject(self) -> None:
pyproject_path = Path(self.path_to("pyproject.toml"))
if pyproject_path.exists():
self._pyproject = toml.load(pyproject_path)
build_system = self._pyproject.get("build-system", None)
setup_py_path = Path(self.path_to("setup.py"))
if not setup_py_path.exists():
if not build_system or not build_system.get("requires"):
build_system = {
"requires": ["setuptools>=40.8.0", "wheel"],
"build-backend": get_default_pyproject_backend(),
}
self._build_system = build_system
@property
def build_requires(self) -> list[str]:
return self._build_system.get("requires", ["setuptools>=40.8.0", "wheel"])
@property
def build_backend(self) -> str:
return self._build_system.get("build-backend", get_default_pyproject_backend())
@property
def settings(self) -> tomlkit.items.Table | dict[str, str | bool]:
"""A dictionary of the settings added to the Pipfile."""
return self.parsed_pipfile.get("pipenv", {})
def has_script(self, name: str) -> bool:
try:
return name in self.parsed_pipfile["scripts"]
except KeyError:
return False
def build_script(self, name: str, extra_args: list[str] | None = None) -> Script:
try:
script = Script.parse(self.parsed_pipfile["scripts"][name])
except KeyError:
script = Script(name)
if extra_args:
script.extend(extra_args)
return script
def update_settings(self, d: dict[str, str | bool]) -> None:
settings = self.settings
changed = False
for new in d.keys(): # noqa: PLC0206
if new not in settings:
settings[new] = d[new]
changed = True
if changed:
p = self.parsed_pipfile
p["pipenv"] = settings
# Write the changes to disk.
self.write_toml(p)
def lockfile(self, categories=None):
"""Pipfile.lock divided by PyPI and external dependencies."""
lockfile_loaded = False
if self.lockfile_exists:
try:
lockfile = self.load_lockfile(expand_env_vars=False)
lockfile_loaded = True
except Exception:
pass
if not lockfile_loaded:
with open(self.pipfile_location) as pf:
lockfile = plette.Lockfile.with_meta_from(
plette.Pipfile.load(pf), categories=categories
)
lockfile = lockfile._data
if categories is None:
categories = self.get_package_categories(for_lockfile=True)
for category in categories:
lock_section = lockfile.get(category)
if lock_section is None:
lockfile[category] = {}
return lockfile
@property
def _pipfile(self):
from pipenv.utils.pipfile import Pipfile as ReqLibPipfile
pf = ReqLibPipfile.load(self.pipfile_location)
return pf
@property
def lockfile_location(self):
return f"{self.pipfile_location}.lock"
@property
def lockfile_exists(self):
return Path(self.lockfile_location).is_file()
@property
def lockfile_content(self):
return self.load_lockfile()
def get_editable_packages(self, category):
packages = {
k: v
for k, v in self.parsed_pipfile.get(category, {}).items()
if is_editable(v)
}
return packages
def _get_vcs_packages(self, dev=False):
from pipenv.utils.requirementslib import is_vcs
section = "dev-packages" if dev else "packages"
packages = {
k: v
for k, v in self.parsed_pipfile.get(section, {}).items()
if is_vcs(v) or is_vcs(k)
}
return packages or {}
@property
def all_packages(self):
"""Returns a list of all packages."""
packages = {}
for category in self.get_package_categories():
packages.update(self.parsed_pipfile.get(category, {}))
return packages
@property
def packages(self):
"""Returns a list of packages."""
return self.get_pipfile_section("packages")
@property
def dev_packages(self):
"""Returns a list of dev-packages."""
return self.get_pipfile_section("dev-packages")
@property
def pipfile_is_empty(self):
if not self.pipfile_exists:
return True
if not self.read_pipfile():
return True
return False
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults."""
# Inherit the pip's index configuration of install command.
command = InstallCommand(name="InstallCommand", summary="pip Install command.")
indexes = command.cmd_opts.get_option("--extra-index-url").default
sources = [self.default_source]
for i, index in enumerate(indexes):
if not index:
continue
source_name = f"pip_index_{i}"
verify_ssl = index.startswith("https")
sources.append({"url": index, "verify_ssl": verify_ssl, "name": source_name})
data = {
"source": sources,
# Default packages.
"packages": {},
"dev-packages": {},
}
# Default requires.
required_python = python
if not python:
required_python = self.which("python")
version = python_version(required_python) or self.s.PIPENV_DEFAULT_PYTHON_VERSION
if version:
data["requires"] = {"python_version": ".".join(version.split(".")[:2])}
if python and version and len(version.split(".")) > 2:
data["requires"].update({"python_full_version": version})
self.write_toml(data)
@classmethod
def populate_source(cls, source):
"""Derive missing values of source from the existing fields."""
# Only URL parameter is mandatory, let the KeyError be thrown.
if "name" not in source:
source["name"] = get_url_name(source["url"])
if "verify_ssl" not in source:
source["verify_ssl"] = "https://" in source["url"]
if not isinstance(source["verify_ssl"], bool):
source["verify_ssl"] = str(source["verify_ssl"]).lower() == "true"
return source
def get_or_create_lockfile(self, categories, from_pipfile=False):
from pipenv.utils.locking import Lockfile as Req_Lockfile
if from_pipfile and self.pipfile_exists:
lockfile_dict = {}
categories = self.get_package_categories(for_lockfile=True)
_lockfile = self.lockfile(categories=categories)
for category in categories:
lockfile_dict[category] = _lockfile.get(category, {}).copy()
lockfile_dict.update({"_meta": self.get_lockfile_meta()})
lockfile = Req_Lockfile.from_data(
path=self.lockfile_location, data=lockfile_dict, meta_from_project=False
)
elif self.lockfile_exists:
try:
lockfile = Req_Lockfile.load(self.lockfile_location)
except OSError:
lockfile = Req_Lockfile.from_data(
self.lockfile_location, self.lockfile_content
)
else:
lockfile = Req_Lockfile.from_data(
path=self.lockfile_location,
data=self.lockfile(),
meta_from_project=False,
)
if lockfile.lockfile is not None:
return lockfile
if self.lockfile_exists and self.lockfile_content:
lockfile_dict = self.lockfile_content.copy()
sources = lockfile_dict.get("_meta", {}).get("sources", [])
if not sources:
sources = self.pipfile_sources(expand_vars=False)
elif not isinstance(sources, list):
sources = [sources]
lockfile_dict["_meta"]["sources"] = [self.populate_source(s) for s in sources]
_created_lockfile = Req_Lockfile.from_data(
path=self.lockfile_location, data=lockfile_dict, meta_from_project=False
)
lockfile.lockfile = lockfile.projectfile.model = _created_lockfile
return lockfile
else:
return self.get_or_create_lockfile(categories=categories, from_pipfile=True)
def get_lockfile_meta(self):
from .vendor.plette.lockfiles import PIPFILE_SPEC_CURRENT
if "source" in self.parsed_pipfile:
sources = [dict(source) for source in self.parsed_pipfile["source"]]
else:
sources = self.pipfile_sources(expand_vars=False)
if not isinstance(sources, list):
sources = [sources]
return {
"hash": {"sha256": self.calculate_pipfile_hash()},
"pipfile-spec": PIPFILE_SPEC_CURRENT,
"sources": [self.populate_source(s) for s in sources],
"requires": self.parsed_pipfile.get("requires", {}),
}
def write_toml(self, data, path=None):
"""Writes the given data structure out as TOML."""
if path is None:
path = self.pipfile_location
data = convert_toml_outline_tables(data, self)
try:
formatted_data = tomlkit.dumps(data).rstrip()
except Exception:
document = tomlkit.document()
for category in self.get_package_categories():
document[category] = tomlkit.table()
# Convert things to inline tables — fancy :)
for package in data.get(category, {}):
if hasattr(data[category][package], "keys"):
table = tomlkit.inline_table()
table.update(data[category][package])
document[category][package] = table
else:
document[category][package] = tomlkit.string(
data[category][package]
)
formatted_data = tomlkit.dumps(document).rstrip()
if Path(path).absolute() == Path(self.pipfile_location).absolute():
newlines = self._pipfile_newlines
else:
newlines = DEFAULT_NEWLINES
formatted_data = cleanup_toml(formatted_data)
with open(path, "w", newline=newlines) as f:
f.write(formatted_data)
def write_lockfile(self, content):
"""Write out the lockfile."""
s = self._lockfile_encoder.encode(content)
open_kwargs = {"newline": self._lockfile_newlines, "encoding": "utf-8"}
with atomic_open_for_write(self.lockfile_location, **open_kwargs) as f:
f.write(s)
# Write newline at end of document. GH-319.
# Only need '\n' here; the file object handles the rest.
if not s.endswith("\n"):
f.write("\n")
def pipfile_sources(self, expand_vars=True):
if self.pipfile_is_empty or "source" not in self.parsed_pipfile:
sources = [self.default_source]
if os.environ.get("PIPENV_PYPI_MIRROR"):
sources[0]["url"] = os.environ["PIPENV_PYPI_MIRROR"]
return sources
# Expand environment variables in the source URLs.
sources = [
{k: safe_expandvars(v) if expand_vars else v for k, v in source.items()}
for source in self.parsed_pipfile["source"]
]
for source in sources:
if os.environ.get("PIPENV_PYPI_MIRROR") and is_pypi_url(source.get("url")):
source["url"] = os.environ["PIPENV_PYPI_MIRROR"]
return sources
def get_default_index(self):
return self.populate_source(self.pipfile_sources()[0])
def get_index_by_name(self, index_name):
for source in self.pipfile_sources():
if source.get("name") == index_name:
return source
def get_index_by_url(self, index_url):
for source in self.pipfile_sources():
if source.get("url") == index_url:
return source
@property
def sources(self):
if self.lockfile_exists and hasattr(self.lockfile_content, "keys"):
meta_ = self.lockfile_content.get("_meta", {})
sources_ = meta_.get("sources")
if sources_:
return sources_
else:
return self.pipfile_sources()
@property
def sources_default(self):
return self.sources[0]
@property
def index_urls(self):
return [src.get("url") for src in self.sources]
def find_source(self, source):
"""
Given a source, find it.
source can be a url or an index name.
"""
if not is_valid_url(source):
try:
source = self.get_source(name=source)
except SourceNotFound:
source = self.get_source(url=source)
else:
source = self.get_source(url=source)
return source
def get_source(self, name=None, url=None, refresh=False):
from pipenv.utils.internet import is_url_equal
def find_source(sources, name=None, url=None):
source = None
if name:
source = next(
iter(s for s in sources if "name" in s and s["name"] == name), None
)
elif url:
source = next(
iter(
s
for s in sources
if "url" in s and is_url_equal(url, s.get("url", ""))
),
None,
)
if source is not None:
return source
sources = (self.sources, self.pipfile_sources())
if refresh:
sources = reversed(sources)
found = next(
iter(find_source(source, name=name, url=url) for source in sources), None
)
target = next(iter(t for t in (name, url) if t is not None))
if found is None:
raise SourceNotFound(target)
return found
def get_package_name_in_pipfile(self, package_name, category):
section = self.parsed_pipfile.get(category, {})
normalized_name = pep423_name(package_name)
for name in section:
if pep423_name(name) == normalized_name:
return name
return package_name # Return original name if not found
def get_pipfile_entry(self, package_name, category):
name = self.get_package_name_in_pipfile(package_name, category)
return self.parsed_pipfile.get(category, {}).get(name)
def _sort_category(self, category) -> Table:
# copy table or create table from dict-like object
table = tomlkit.table()
if isinstance(category, Table):
table.update(category.value)
else:
table.update(category)
# sort the table internally
table._value._body.sort(key=lambda t: t[0] and t[0].key or "")
for index, (key, _) in enumerate(table._value._body):
assert isinstance(key, SingleKey)
indices = table._value._map[key]
if isinstance(indices, tuple):
table._value._map[key] = (index,) + indices[1:]
else:
table._value._map[key] = index
return table
def remove_package_from_pipfile(self, package_name, category):
# Read and append Pipfile.
name = self.get_package_name_in_pipfile(package_name, category=category)
p = self.parsed_pipfile
if name:
del p[category][name]
if self.settings.get("sort_pipfile"):
p[category] = self._sort_category(p[category])
self.write_toml(p)
return True
return False
def reset_category_in_pipfile(self, category):
# Read and append Pipfile.
p = self.parsed_pipfile
if category:
del p[category]
p[category] = {}
self.write_toml(p)
return True
return False
def remove_packages_from_pipfile(self, packages):
parsed = self.parsed_pipfile
packages = {pep423_name(pkg) for pkg in packages}
for category in self.get_package_categories():
pipfile_section = parsed.get(category, {})
pipfile_packages = {pep423_name(pkg_name) for pkg_name in pipfile_section}
to_remove = packages & pipfile_packages
for pkg in to_remove:
pkg_name = self.get_package_name_in_pipfile(pkg, category=category)
del parsed[category][pkg_name]
self.write_toml(parsed)
def generate_package_pipfile_entry(
self, package, pip_line, category=None, index_name=None
):
"""Generate a package entry from pip install line
given the installreq package and the pip line that generated it.
"""
# Don't re-capitalize file URLs or VCSs.
if not isinstance(package, InstallRequirement):
package, req_name = expansive_install_req_from_line(package.strip())
else:
_, req_name = expansive_install_req_from_line(pip_line.strip())
if req_name is None:
req_name = determine_package_name(package)
path_specifier = determine_path_specifier(package)
vcs_specifier = determine_vcs_specifier(package)
name = self.get_package_name_in_pipfile(req_name, category=category)
normalized_name = normalize_name(req_name)
extras = package.extras
specifier = "*"
if package.req and package.specifier:
specifier = str(package.specifier)
# Construct package requirement
entry = {}
if extras:
entry["extras"] = list(extras)
if path_specifier:
editable = pip_line.startswith("-e")
entry["file"] = unquote(
normalize_editable_path_for_pip(path_specifier)
if editable
else str(path_specifier)
)
if editable:
entry["editable"] = editable
elif vcs_specifier:
for vcs in VCS_LIST:
if vcs in package.link.scheme:
if pip_line.startswith("-e"):
entry["editable"] = True
pip_line = pip_line.replace("-e ", "")
if "[" in pip_line and "]" in pip_line:
extras_section = pip_line.split("[")[1].split("]")[0]
entry["extras"] = sorted(
[extra.strip() for extra in extras_section.split(",")]
)
if "@ " in pip_line:
vcs_part = pip_line.split("@ ", 1)[1]
else:
vcs_part = pip_line
vcs_parts = vcs_part.rsplit("@", 1)
if len(vcs_parts) > 1:
entry["ref"] = vcs_parts[1].split("#", 1)[0].strip()
vcs_url = vcs_parts[0].strip()
vcs_url = extract_vcs_url(vcs_url)
entry[vcs] = vcs_url
# Check and extract subdirectory fragment
if package.link.subdirectory_fragment:
entry["subdirectory"] = package.link.subdirectory_fragment
break
else:
entry["version"] = specifier
if index_name:
entry["index"] = index_name
elif hasattr(package, "index"):
entry["index"] = package.index
if len(entry) == 1 and "version" in entry:
return name, normalized_name, specifier
else:
return name, normalized_name, entry
def add_package_to_pipfile(self, package, pip_line, dev=False, category=None):
category = category if category else "dev-packages" if dev else "packages"
name, normalized_name, entry = self.generate_package_pipfile_entry(
package, pip_line, category=category
)
return self.add_pipfile_entry_to_pipfile(
name, normalized_name, entry, category=category
)
def add_pipfile_entry_to_pipfile(self, name, normalized_name, entry, category=None):
newly_added = False
# Read and append Pipfile.
parsed_pipfile = self.parsed_pipfile
# Set empty group if it doesn't exist yet.
if category not in parsed_pipfile:
parsed_pipfile[category] = {}
section = parsed_pipfile.get(category, {})
for entry_name in section.copy().keys():
if entry_name.lower() == normalized_name.lower():
del parsed_pipfile[category][entry_name]
# Add the package to the group.
if normalized_name not in parsed_pipfile[category]:
newly_added = True
parsed_pipfile[category][normalized_name] = entry
if self.settings.get("sort_pipfile"):
parsed_pipfile[category] = self._sort_category(parsed_pipfile[category])
# Write Pipfile.
self.write_toml(parsed_pipfile)
return newly_added, category, normalized_name
def add_packages_to_pipfile_batch(self, packages_data, dev=False, categories=None):
"""
Add multiple packages to Pipfile in a single operation for better performance.
Args:
packages_data: List of tuples (package, pip_line) or list of dicts with package info
dev: Whether to add to dev-packages section
categories: List of categories to add packages to
Returns:
List of tuples (newly_added, category, normalized_name) for each package
"""
if not packages_data:
return []
# Determine target categories
if categories is None or (isinstance(categories, list) and len(categories) == 0):
categories = ["dev-packages" if dev else "packages"]
elif isinstance(categories, str):
categories = [categories]
# Read Pipfile once
parsed_pipfile = self.parsed_pipfile
results = []
# Ensure all categories exist
for category in categories:
if category not in parsed_pipfile:
parsed_pipfile[category] = {}
# Process all packages
for package_data in packages_data:
if isinstance(package_data, tuple) and len(package_data) == 2:
package, pip_line = package_data
# Generate entry for this package
name, normalized_name, entry = self.generate_package_pipfile_entry(
package, pip_line, category=categories[0]
)
# Add to each specified category
for category in categories:
newly_added = False
# Remove any existing entries with different casing
section = parsed_pipfile.get(category, {})
for entry_name in section.copy().keys():
if entry_name.lower() == normalized_name.lower():
del parsed_pipfile[category][entry_name]
# Check if this is a new package
if normalized_name not in parsed_pipfile[category]:
newly_added = True
# Add the package
parsed_pipfile[category][normalized_name] = entry
results.append((newly_added, category, normalized_name))
elif isinstance(package_data, dict):
# Handle pre-processed package data
name = package_data.get("name")
normalized_name = package_data.get("normalized_name")
entry = package_data.get("entry")
if name and normalized_name and entry:
for category in categories:
newly_added = False
# Remove any existing entries with different casing
section = parsed_pipfile.get(category, {})
for entry_name in section.copy().keys():
if entry_name.lower() == normalized_name.lower():
del parsed_pipfile[category][entry_name]
# Check if this is a new package
if normalized_name not in parsed_pipfile[category]:
newly_added = True
# Add the package
parsed_pipfile[category][normalized_name] = entry
results.append((newly_added, category, normalized_name))
# Sort categories if requested
if self.settings.get("sort_pipfile"):
for category in categories:
if category in parsed_pipfile:
parsed_pipfile[category] = self._sort_category(
parsed_pipfile[category]
)
# Write Pipfile once at the end
self.write_toml(parsed_pipfile)
return results
def src_name_from_url(self, index_url):
location = urllib.parse.urlsplit(index_url).netloc
if "." in location:
name, _, tld_guess = location.rpartition(".")
else:
name = location
src_name = name.replace(".", "").replace(":", "")
try:
self.get_source(name=src_name)
except SourceNotFound:
name = src_name
else:
from random import randint
name = f"{src_name}-{randint(1, 1000)}"
return name
def add_index_to_pipfile(self, index, verify_ssl=True):
"""
Adds a given index to the Pipfile if it doesn't already exist.
Returns the source name regardless of whether it was newly added or already existed.
"""
# Read and append Pipfile.
p = self.parsed_pipfile
source = None
# Try to find existing source by URL or name
try:
source = self.get_source(url=index)
except SourceNotFound:
with contextlib.suppress(SourceNotFound):
source = self.get_source(name=index)
# If we found an existing source with a name, return it
if source is not None and source.get("name"):
return source["name"]
# Check if the URL already exists in any source
if "source" in p:
for existing_source in p["source"]:
if existing_source.get("url") == index:
return existing_source.get("name")
# If we reach here, the source doesn't exist, so create and add it
source = {
"url": index,
"verify_ssl": verify_ssl,
"name": self.src_name_from_url(index),
}
# Add the source to the group
if "source" not in p:
p["source"] = [tomlkit.item(source)]
else:
p["source"].append(tomlkit.item(source))
# Write Pipfile
self.write_toml(p)
return source["name"]
def recase_pipfile(self):
if self.ensure_proper_casing():
self.write_toml(self.parsed_pipfile)
def load_lockfile(self, expand_env_vars=True):
lockfile_modified = False
lockfile_path = Path(self.lockfile_location)
pipfile_path = Path(self.pipfile_location)
try:
with lockfile_path.open(encoding="utf-8") as lock:
try:
j = json.load(lock)
self._lockfile_newlines = preferred_newlines(lock)
except JSONDecodeError:
err.print(
"[bold yellow]Pipfile.lock is corrupted; ignoring contents.[/bold yellow]"
)
j = {}
except FileNotFoundError:
j = {}
if not j.get("_meta"):
with pipfile_path.open() as pf:
default_lockfile = plette.Lockfile.with_meta_from(
plette.Pipfile.load(pf), categories=[]
)
j["_meta"] = default_lockfile._data["_meta"]
lockfile_modified = True
if j.get("default") is None:
j["default"] = {}
lockfile_modified = True
if j.get("develop") is None:
j["develop"] = {}
lockfile_modified = True
if lockfile_modified:
self.write_lockfile(j)
if expand_env_vars:
# Expand environment variables in Pipfile.lock at runtime.
for i, _ in enumerate(j["_meta"].get("sources", {})):
# Path doesn't have expandvars method, so we need to use os.path.expandvars
j["_meta"]["sources"][i]["url"] = os.path.expandvars(
j["_meta"]["sources"][i]["url"]
)
return j
def get_lockfile_hash(self):
lockfile_path = Path(self.lockfile_location)
if not lockfile_path.exists():
return
try:
lockfile = self.load_lockfile(expand_env_vars=False)
except ValueError:
# Lockfile corrupted
return ""
if "_meta" in lockfile and hasattr(lockfile, "keys"):
return lockfile["_meta"].get("hash", {}).get("sha256") or ""
# Lockfile exists but has no hash at all
return ""
def calculate_pipfile_hash(self):
# Update the lockfile if it is out-of-date.
with open(self.pipfile_location) as pf:
p = plette.Pipfile.load(pf)
return p.get_hash().value
def ensure_proper_casing(self):
"""Ensures proper casing of Pipfile packages"""
pfile = self.parsed_pipfile
casing_changed = self.proper_case_section(pfile.get("packages", {}))
casing_changed |= self.proper_case_section(pfile.get("dev-packages", {}))
return casing_changed
def proper_case_section(self, section):
"""Verify proper casing is retrieved, when available, for each
dependency in the section.
"""
# Casing for section.
changed_values = False
unknown_names = [k for k in section if k not in set(self.proper_names)]
# Replace each package with proper casing.
for dep in unknown_names:
try:
# Get new casing for package name.
new_casing = proper_case(dep)
except OSError:
# Unable to normalize package name.
continue
if new_casing != dep:
changed_values = True
self.register_proper_name(new_casing)
# Replace old value with new value.
old_value = section[dep]
section[new_casing] = old_value
del section[dep]
# Return whether or not values have been changed.
return changed_values
@cached_property
def finders(self):
from .vendor.pythonfinder import Finder
finders = [
Finder(
path=str(self.virtualenv_scripts_location), global_search=gs, system=False
)
for gs in (False, True)
]
return finders
@property
def finder(self):
return next(iter(self.finders), None)
def which(self, search):
find = operator.methodcaller("which", search)
result = next(iter(filter(None, (find(finder) for finder in self.finders))), None)
if not result:
result = self._which(search)
return result
def python(self, system=False) -> str:
"""Path to the project python"""
from pipenv.utils.shell import project_python
return project_python(self, system=system)
def _which(self, command, location=None, allow_global=False):
if not allow_global and location is None:
if self.virtualenv_exists:
location = self.virtualenv_location
else:
location = os.environ.get("VIRTUAL_ENV", None)
location_path = Path(location) if location else None
if not (location_path and location_path.exists()) and not allow_global:
raise RuntimeError("location not created nor specified")
version_str = f"python{'.'.join([str(v) for v in sys.version_info[:2]])}"
is_python = command in ("python", Path(sys.executable).name, version_str)
if not allow_global:
scripts_location = virtualenv_scripts_dir(location_path)
if os.name == "nt":
p = find_windows_executable(str(scripts_location), command)
# Convert to Path object if it's a string
p = Path(p) if isinstance(p, str) else p
else:
p = scripts_location / command
elif is_python:
p = Path(sys.executable)
else:
p = None
if p is None or not p.exists():
if is_python:
p = (
Path(sys.executable)
if sys.executable
else Path(system_which("python"))
)
else:
p = Path(system_which(command)) if system_which(command) else None
return p
| Project |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 12703,
"end": 12754
} | class ____(DataType):
"""Boolean type."""
| Boolean |
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 8611,
"end": 8797
} | class ____(ResolvedAggregate):
metric_name: str | None
metric_type: MetricType | None
metric_unit: str | None
@dataclass(frozen=True, kw_only=True)
| ResolvedTraceMetricAggregate |
python | facebook__pyre-check | client/commands/report.py | {
"start": 2462,
"end": 5976
} | class ____(json_mixins.SnakeCaseAndExcludeJsonMixin):
path: str
mode: coverage_data.ModuleModeInfo
suppressions: Sequence[coverage_data.TypeErrorSuppression]
functions: Sequence[coverage_data.FunctionAnnotationInfo]
empty_containers: Sequence[coverage_data.EmptyContainerInfo]
@staticmethod
def collect(
module: libcst.MetadataWrapper,
path: ModulePath,
strict_by_default: bool,
ignored: bool,
) -> ModuleData:
mode = coverage_data.collect_mode(
module, strict_by_default, path.relative_to_root, ignored
)
suppressions = coverage_data.collect_suppressions(module)
functions = coverage_data.collect_functions(module)
empty_containers = coverage_data.collect_empty_containers(module)
return ModuleData(
mode=mode,
suppressions=suppressions,
functions=functions,
empty_containers=empty_containers,
# `path` is relative here so that data isn't tied to one machine.
path=str(path.relative_to_root),
)
@dataclasses.dataclass(frozen=True)
class CollectFromPathArgs:
"""
Multiprocessing requires mapping a function over a list of single
arguments, so we have to make a struct in order to parallelize
collect_from_path.
"""
path: ModulePath
strict_by_default: bool
ignored: bool
@staticmethod
def collect_from_path(
args: ModuleData.CollectFromPathArgs,
) -> Optional[ModuleData]:
module = coverage_data.module_from_path(args.path.absolute_path())
if module is None:
return None
else:
return ModuleData.collect(
module,
path=args.path,
strict_by_default=args.strict_by_default,
ignored=args.ignored,
)
@staticmethod
def collect_from_paths(
module_paths: Sequence[ModulePath],
strict_by_default: bool,
number_of_workers: int,
ignored_modules: Sequence[ModulePath],
) -> List[ModuleData]:
tasks = []
for path in module_paths:
ignored = path in ignored_modules
tasks.append(
ModuleData.CollectFromPathArgs(
path=path, strict_by_default=strict_by_default, ignored=ignored
)
)
with multiprocessing.Pool(number_of_workers) as pool:
return [
module_data
for module_data in pool.imap_unordered(
ModuleData.collect_from_path, tasks
)
if module_data is not None
]
def print_data_as_json(data: Sequence[ModuleData]) -> None:
raw_data = [module_data.to_dict() for module_data in data]
json.dump(raw_data, log.stdout)
def run(
configuration: frontend_configuration.Base,
paths: Optional[List[Path]],
) -> int:
module_paths = get_module_paths(
configuration=configuration,
paths=paths,
)
ignored_paths = [Path(path) for path in configuration.get_ignore_all_errors()]
data = ModuleData.collect_from_paths(
module_paths,
strict_by_default=configuration.is_strict(),
number_of_workers=configuration.get_number_of_workers(),
ignored_modules=get_module_paths(configuration, ignored_paths),
)
print_data_as_json(data)
return commands.ExitCode.SUCCESS
| ModuleData |
python | huggingface__transformers | src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py | {
"start": 17754,
"end": 24885
} | class ____(nn.Module):
"""Construct an Wav2Vec2ConformerSelfAttention object.
Can be enhanced with rotary or relative position embeddings.
"""
def __init__(self, config):
super().__init__()
self.head_size = config.hidden_size // config.num_attention_heads
self.num_heads = config.num_attention_heads
self.position_embeddings_type = config.position_embeddings_type
self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(p=config.attention_dropout)
if self.position_embeddings_type == "relative":
# linear transformation for positional encoding
self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://huggingface.co/papers/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
relative_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
# self-attention mechanism
batch_size, sequence_length, hidden_size = hidden_states.size()
# make sure query/key states can be != value states
query_key_states = hidden_states
value_states = hidden_states
if self.position_embeddings_type == "rotary":
if relative_position_embeddings is None:
raise ValueError(
"`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'"
)
query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
# project query_key_states and value_states
query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
# => (batch, head, time1, d_k)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
if self.position_embeddings_type == "relative":
if relative_position_embeddings is None:
raise ValueError(
"`relative_position_embeddings` has to be defined when `self.position_embeddings_type =="
" 'relative'"
)
# apply relative_position_embeddings to qk scores
# as proposed in Transformer_XL: https://huggingface.co/papers/1901.02860
scores = self._apply_relative_embeddings(
query=query, key=key, relative_position_embeddings=relative_position_embeddings
)
else:
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
# apply attention_mask if necessary
if attention_mask is not None:
scores = scores + attention_mask
# => (batch, head, time1, time2)
probs = torch.softmax(scores, dim=-1)
probs = self.dropout(probs)
# => (batch, head, time1, d_k)
hidden_states = torch.matmul(probs, value)
# => (batch, time1, hidden_size)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
hidden_states = self.linear_out(hidden_states)
return hidden_states, probs
def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
batch_size, sequence_length, hidden_size = hidden_states.size()
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
cos = relative_position_embeddings[0, :sequence_length, ...]
sin = relative_position_embeddings[1, :sequence_length, ...]
# rotate hidden_states with rotary embeddings
hidden_states = hidden_states.transpose(0, 1)
rotated_states_begin = hidden_states[..., : self.head_size // 2]
rotated_states_end = hidden_states[..., self.head_size // 2 :]
rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
hidden_states = (hidden_states * cos) + (rotated_states * sin)
hidden_states = hidden_states.transpose(0, 1)
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
return hidden_states
def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
# 1. project positional embeddings
# => (batch, head, 2*time1-1, d_k)
proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
proj_relative_position_embeddings = proj_relative_position_embeddings.view(
relative_position_embeddings.size(0), -1, self.num_heads, self.head_size
)
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
# 2. Add bias to query
# => (batch, head, time1, d_k)
query = query.transpose(1, 2)
q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
# 3. attention score: first compute matrix a and matrix c
# as described in https://huggingface.co/papers/1901.02860 Section 3.3
# => (batch, head, time1, time2)
scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
# 4. then compute matrix b and matrix d
# => (batch, head, time1, 2*time1-1)
scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
# 5. shift matrix b and matrix d
zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1]
# 6. sum matrices
# => (batch, head, time1, time2)
scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
return scores
| Wav2Vec2ConformerSelfAttention |
python | ray-project__ray | python/ray/train/tests/test_tensorflow_checkpoint.py | {
"start": 1173,
"end": 4614
} | class ____(unittest.TestCase):
def setUp(self):
self.model = get_model()
self.preprocessor = DummyPreprocessor(1)
def test_from_model(self):
checkpoint = TensorflowCheckpoint.from_model(
self.model, preprocessor=DummyPreprocessor(1)
)
loaded_model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
assert compare_weights(loaded_model.get_weights(), self.model.get_weights())
assert preprocessor.multiplier == 1
def test_from_saved_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
model_dir_path = os.path.join(tmp_dir, "my_model")
self.model.save(model_dir_path, save_format="tf")
checkpoint = TensorflowCheckpoint.from_saved_model(
model_dir_path, preprocessor=DummyPreprocessor(1)
)
loaded_model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
assert compare_weights(self.model.get_weights(), loaded_model.get_weights())
assert preprocessor.multiplier == 1
def test_from_h5_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
model_file_path = os.path.join(tmp_dir, "my_model.h5")
self.model.save(model_file_path)
checkpoint = TensorflowCheckpoint.from_h5(
model_file_path, preprocessor=DummyPreprocessor(1)
)
loaded_model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
assert compare_weights(self.model.get_weights(), loaded_model.get_weights())
assert preprocessor.multiplier == 1
def test_tensorflow_checkpoint_saved_model():
# The test passes if the following can run successfully.
def train_fn():
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=()),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(1),
]
)
with tempfile.TemporaryDirectory() as tempdir:
model.save(tempdir)
checkpoint = TensorflowCheckpoint.from_saved_model(tempdir)
train.report({"my_metric": 1}, checkpoint=checkpoint)
trainer = TensorflowTrainer(
train_loop_per_worker=train_fn, scaling_config=ScalingConfig(num_workers=2)
)
assert trainer.fit().checkpoint
def test_tensorflow_checkpoint_h5():
# The test passes if the following can run successfully.
def train_func():
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=()),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(1),
]
)
with tempfile.TemporaryDirectory() as tempdir:
model.save(os.path.join(tempdir, "my_model.h5"))
checkpoint = TensorflowCheckpoint.from_h5(
os.path.join(tempdir, "my_model.h5")
)
train.report({"my_metric": 1}, checkpoint=checkpoint)
trainer = TensorflowTrainer(
train_loop_per_worker=train_func, scaling_config=ScalingConfig(num_workers=2)
)
assert trainer.fit().checkpoint
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| TestFromModel |
python | scikit-learn__scikit-learn | sklearn/preprocessing/_data.py | {
"start": 80862,
"end": 88255
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
r"""Center an arbitrary kernel matrix :math:`K`.
Let define a kernel :math:`K` such that:
.. math::
K(X, Y) = \phi(X) . \phi(Y)^{T}
:math:`\phi(X)` is a function mapping of rows of :math:`X` to a
Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`.
This class allows to compute :math:`\tilde{K}(X, Y)` such that:
.. math::
\tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T}
:math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert
space.
`KernelCenterer` centers the features without explicitly computing the
mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime
expected when dealing with algebra computation such as eigendecomposition
for :class:`~sklearn.decomposition.KernelPCA` for instance.
Read more in the :ref:`User Guide <kernel_centering>`.
Attributes
----------
K_fit_rows_ : ndarray of shape (n_samples,)
Average of each column of kernel matrix.
K_fit_all_ : float
Average of kernel matrix.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.kernel_approximation.Nystroem : Approximate a kernel map
using a subset of the training data.
References
----------
.. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
"Nonlinear component analysis as a kernel eigenvalue problem."
Neural computation 10.5 (1998): 1299-1319.
<https://www.mlpack.org/papers/kpca.pdf>`_
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
# X is called K in these methods.
__metadata_request__transform = {"K": metadata_routing.UNUSED}
__metadata_request__fit = {"K": metadata_routing.UNUSED}
def fit(self, K, y=None):
"""Fit KernelCenterer.
Parameters
----------
K : ndarray of shape (n_samples, n_samples)
Kernel matrix.
y : None
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
xp, _ = get_namespace(K)
K = validate_data(self, K, dtype=_array_api.supported_float_dtypes(xp))
if K.shape[0] != K.shape[1]:
raise ValueError(
"Kernel matrix must be a square matrix."
" Input is a {}x{} matrix.".format(K.shape[0], K.shape[1])
)
n_samples = K.shape[0]
self.K_fit_rows_ = xp.sum(K, axis=0) / n_samples
self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel matrix.
Parameters
----------
K : ndarray of shape (n_samples1, n_samples2)
Kernel matrix.
copy : bool, default=True
Set to False to perform inplace computation.
Returns
-------
K_new : ndarray of shape (n_samples1, n_samples2)
Returns the instance itself.
"""
check_is_fitted(self)
xp, _ = get_namespace(K)
K = validate_data(
self,
K,
copy=copy,
force_writeable=True,
dtype=_array_api.supported_float_dtypes(xp),
reset=False,
)
K_pred_cols = (xp.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, None]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _n_features_out(self):
"""Number of transformed output features."""
# Used by ClassNamePrefixFeaturesOutMixin. This model preserves the
# number of input features but this is not a one-to-one mapping in the
# usual sense. Hence the choice not to use OneToOneFeatureMixin to
# implement get_feature_names_out for this class.
return self.n_features_in_
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = True
tags.array_api_support = True
return tags
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"value": [Interval(Real, None, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if X.format == "coo":
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif X.format == "csc":
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
| KernelCenterer |
python | great-expectations__great_expectations | great_expectations/data_context/cloud_constants.py | {
"start": 500,
"end": 946
} | class ____(str, Enum):
ACCOUNTS_ME = "accounts/me"
CHECKPOINT = "checkpoint"
DATASOURCE = "datasource"
DATA_ASSET = "data_asset"
DATA_CONTEXT = "data_context_configuration"
DATA_CONTEXT_VARIABLES = "data_context_variables"
EXPECTATION_SUITE = "expectation_suite"
RENDERED_DATA_DOC = "rendered_data_doc"
VALIDATION_DEFINITION = "validation_definition"
VALIDATION_RESULT = "validation_result"
| GXCloudRESTResource |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/errors.py | {
"start": 305,
"end": 505
} | class ____(DagsterDbtError, DagsterInvariantViolationError):
"""Represents an error when a dbt Cloud job is not supported by the ``dagster-dbt`` library."""
| DagsterDbtCloudJobInvariantViolationError |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 1907,
"end": 4648
} | class ____(BaseDataset):
"""
Feature: Datasets can be created from a shape only
"""
def test_create_scalar(self):
""" Create a scalar dataset """
dset = self.f.create_dataset(make_name(), ())
self.assertEqual(dset.shape, ())
def test_create_simple(self):
""" Create a size-1 dataset """
dset = self.f.create_dataset(make_name(), (1,))
self.assertEqual(dset.shape, (1,))
def test_create_integer(self):
""" Create a size-1 dataset with integer shape"""
dset = self.f.create_dataset(make_name(), 1)
self.assertEqual(dset.shape, (1,))
def test_create_extended_1d(self):
""" Create an extended dataset with tuple shape """
dset = self.f.create_dataset(make_name(), (63,))
self.assertEqual(dset.shape, (63,))
self.assertEqual(dset.size, 63)
def test_create_extended_2d(self):
""" Create an extended dataset with 2 dimensions """
dset = self.f.create_dataset(make_name(), (6, 10))
self.assertEqual(dset.shape, (6, 10))
self.assertEqual(dset.size, (60))
def test_create_integer_extended(self):
""" Create an extended dataset with integer shape """
dset = self.f.create_dataset(make_name(), 63)
self.assertEqual(dset.shape, (63,))
self.assertEqual(dset.size, 63)
def test_default_dtype(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset(make_name(), (63,))
self.assertEqual(dset.dtype, np.dtype('=f4'))
def test_missing_shape(self):
""" Missing shape raises TypeError """
with self.assertRaises(TypeError):
self.f.create_dataset(make_name())
def test_long_double(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset(make_name(), (63,), dtype=np.longdouble)
if platform.machine() in ['ppc64le']:
pytest.xfail("Storage of long double deactivated on %s" % platform.machine())
self.assertEqual(dset.dtype, np.longdouble)
@ut.skipIf(not hasattr(np, "complex256"), "No support for complex256")
def test_complex256(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset(make_name(), (63,),
dtype=np.dtype('complex256'))
self.assertEqual(dset.dtype, np.dtype('complex256'))
def test_name_bytes(self):
dset = self.f.create_dataset(make_name("foo").encode('utf-8'), (1,))
self.assertEqual(dset.shape, (1,))
dset2 = self.f.create_dataset((make_name("bar{}/baz")).encode('utf-8'), (2,))
self.assertEqual(dset2.shape, (2,))
| TestCreateShape |
python | donnemartin__interactive-coding-challenges | online_judges/longest_abs_file_path/test_length_longest_path.py | {
"start": 18,
"end": 644
} | class ____(unittest.TestCase):
def test_length_longest_path(self):
solution = Solution()
self.assertRaises(TypeError, solution.length_longest_path, None)
self.assertEqual(solution.length_longest_path(''), 0)
file_system = 'dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext'
expected = 32
self.assertEqual(solution.length_longest_path(file_system), expected)
print('Success: test_length_longest_path')
def main():
test = TestSolution()
test.test_length_longest_path()
if __name__ == '__main__':
main()
| TestSolution |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 58049,
"end": 60197
} | class ____(Response):
"""
Response of workers.get_metric_keys endpoint.
:param categories: List of unique metric categories found in the statistics of
the requested workers.
:type categories: Sequence[MetricsCategory]
"""
_service = "workers"
_action = "get_metric_keys"
_version = "2.13"
_schema = {
"definitions": {
"metrics_category": {
"properties": {
"metric_keys": {
"description": "The names of the metrics in the category.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Name of the metrics category.",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"categories": {
"description": "List of unique metric categories found in the statistics of the requested workers.",
"items": {"$ref": "#/definitions/metrics_category"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, categories: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetMetricKeysResponse, self).__init__(**kwargs)
self.categories = categories
@schema_property("categories")
def categories(self) -> Optional[List[Any]]:
return self._property_categories
@categories.setter
def categories(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_categories = None
return
self.assert_isinstance(value, "categories", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricsCategory.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "categories", MetricsCategory, is_array=True)
self._property_categories = value
| GetMetricKeysResponse |
python | kamyu104__LeetCode-Solutions | Python/number-of-unique-flavors-after-sharing-k-candies.py | {
"start": 50,
"end": 565
} | class ____(object):
def shareCandies(self, candies, k):
"""
:type candies: List[int]
:type k: int
:rtype: int
"""
cnt = collections.Counter(candies[i] for i in xrange(k, len(candies)))
result = curr = len(cnt)
for i in xrange(k, len(candies)):
cnt[candies[i]] -= 1
curr += (cnt[candies[i-k]] == 0) - (cnt[candies[i]] == 0)
cnt[candies[i-k]] += 1
result = max(result, curr)
return result
| Solution |
python | ray-project__ray | python/ray/autoscaler/_private/cluster_dump.py | {
"start": 1359,
"end": 1797
} | class ____:
"""Node (as in "machine")"""
def __init__(
self,
host: str,
ssh_user: str = "ubuntu",
ssh_key: str = "~/ray_bootstrap_key.pem",
docker_container: Optional[str] = None,
is_head: bool = False,
):
self.host = host
self.ssh_user = ssh_user
self.ssh_key = ssh_key
self.docker_container = docker_container
self.is_head = is_head
| Node |
python | doocs__leetcode | solution/2000-2099/2002.Maximum Product of the Length of Two Palindromic Subsequences/Solution.py | {
"start": 0,
"end": 855
} | class ____:
def maxProduct(self, s: str) -> int:
n = len(s)
p = [True] * (1 << n)
for k in range(1, 1 << n):
i, j = 0, n - 1
while i < j:
while i < j and (k >> i & 1) == 0:
i += 1
while i < j and (k >> j & 1) == 0:
j -= 1
if i < j and s[i] != s[j]:
p[k] = False
break
i, j = i + 1, j - 1
ans = 0
for i in range(1, 1 << n):
if p[i]:
mx = ((1 << n) - 1) ^ i
j = mx
a = i.bit_count()
while j:
if p[j]:
b = j.bit_count()
ans = max(ans, a * b)
j = (j - 1) & mx
return ans
| Solution |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 3570,
"end": 3806
} | class ____(OverrideSwitchMixin, TransactionTestCase):
"""
Run tests with Django TransactionTestCase
"""
def req():
r = RequestFactory().get('/')
r.user = AnonymousUser()
return r
| OverrideSwitchTransactionTestCase |
python | Pylons__pyramid | tests/test_scripts/test_ptweens.py | {
"start": 1834,
"end": 2092
} | class ____(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.ptweens import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['ptweens'])
self.assertEqual(result, 2)
| Test_main |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 6959,
"end": 7463
} | class ____(Opcode):
# Even though dis documentation says that END_ASYNC_FOR may reraise an
# exception, we do not include NO_NEXT in the flags because doing so would
# cause the return statement for an async method to be skipped, leading to
# an incorrect return type.
# See tests/test_stdlib2:StdlibTestsFeatures.test_async_iter and
# tests/test_coroutine:GeneratorFeatureTest.test_async_for_pyi for tests
# that fail if we add NO_NEXT.
_FLAGS = HAS_JUNKNOWN
__slots__ = ()
| END_ASYNC_FOR |
python | django__django | tests/admin_inlines/admin.py | {
"start": 5831,
"end": 6020
} | class ____(admin.TabularInline):
model = FootNote
form = FootNoteForm
def has_change_permission(self, request, obj=None):
return False
| FootNoteNonEditableInlineCustomForm |
python | spyder-ide__spyder | spyder/api/preferences.py | {
"start": 1961,
"end": 6406
} | class ____(SpyderConfigPage):
"""
Widget to expose the options a plugin offers for configuration as
an entry in Spyder's Preferences dialog.
"""
# TODO: Temporal attribute to handle which appy_settings method to use
# the one of the conf page or the one in the plugin, while the config
# dialog system is updated.
APPLY_CONF_PAGE_SETTINGS = False
def __init__(self, plugin, parent):
self.plugin = plugin
self.main = parent.main
if hasattr(plugin, 'CONF_SECTION'):
self.CONF_SECTION = plugin.CONF_SECTION
if hasattr(plugin, 'get_font'):
self.get_font = plugin.get_font
if not self.APPLY_CONF_PAGE_SETTINGS:
self._patch_apply_settings(plugin)
SpyderConfigPage.__init__(self, parent)
def _wrap_apply_settings(self, func):
"""
Wrap apply_settings call to ensure that a user-defined custom call
is called alongside the Spyder Plugin API configuration propagation
call.
"""
def wrapper(self, options):
opts = self.previous_apply_settings() or set({})
opts |= options
self.aggregate_sections_partials(opts)
func(opts)
return types.MethodType(wrapper, self)
def _patch_apply_settings(self, plugin):
self.previous_apply_settings = self.apply_settings
self.apply_settings = self._wrap_apply_settings(plugin.apply_conf)
self.get_option = plugin.get_conf
self.set_option = plugin.set_conf
self.remove_option = plugin.remove_conf
def aggregate_sections_partials(self, opts):
"""Aggregate options by sections in order to notify observers."""
to_update = {}
for opt in opts:
if isinstance(opt, tuple):
# This is necessary to filter tuple options that do not
# belong to a section.
if len(opt) == 2 and opt[0] is None:
opt = opt[1]
section = self.CONF_SECTION
if opt in self.cross_section_options:
section = self.cross_section_options[opt]
section_options = to_update.get(section, [])
section_options.append(opt)
to_update[section] = section_options
for section in to_update:
section_prefix = PrefixedTuple()
# Notify section observers
CONF.notify_observers(section, '__section',
recursive_notification=False)
for opt in to_update[section]:
if isinstance(opt, tuple):
opt = opt[:-1]
section_prefix.add_path(opt)
# Notify prefixed observers
for prefix in section_prefix:
try:
CONF.notify_observers(section, prefix,
recursive_notification=False)
except Exception:
# Prevent unexpected failures on tests
pass
def get_name(self):
"""
Return plugin name to use in preferences page title, and
message boxes.
Normally you do not have to reimplement it, as soon as the
plugin name in preferences page will be the same as the plugin
title.
"""
return self.plugin.get_name()
def get_icon(self):
"""
Return plugin icon to use in preferences page.
Normally you do not have to reimplement it, as soon as the
plugin icon in preferences page will be the same as the plugin
icon.
"""
return self.plugin.get_icon()
def setup_page(self):
"""
Setup configuration page widget
You should implement this method and set the layout of the
preferences page.
layout = QVBoxLayout()
layout.addWidget(...)
...
self.setLayout(layout)
"""
raise NotImplementedError
def apply_settings(self) -> OptionSet:
"""
Hook called to manually apply settings that cannot be automatically
applied.
Reimplement this if the configuration page has complex widgets that
cannot be created with any of the `self.create_*` calls.
This call should return a set containing the configuration options that
changed.
"""
return set({})
| PluginConfigPage |
python | geekcomputers__Python | CliYoutubeDownloader/CliYoutubeDownloader.py | {
"start": 41,
"end": 2527
} | class ____:
def __init__(self):
self.url = str(input("Enter the URL of video : "))
self.youtube = pytube.YouTube(
self.url, on_progress_callback=YouTubeDownloder.onProgress
)
self.showTitle()
def showTitle(self):
print("title : {0}\n".format(self.youtube.title))
self.showStreams()
def showStreams(self):
self.streamNo = 1
for stream in self.youtube.streams:
print(
"{0} => resolution:{1}/fps:{2}/type:{3}".format(
self.streamNo, stream.resolution, stream.fps, stream.type
)
)
self.streamNo += 1
self.chooseStream()
def chooseStream(self):
self.choose = int(input("Please select one : "))
self.validateChooseValue()
def validateChooseValue(self):
if self.choose in range(1, self.streamNo):
self.getStream()
else:
print("Please enter a correct option on the list.")
self.chooseStream()
def getStream(self):
self.stream = self.youtube.streams[self.choose - 1]
self.getFileSize()
def getFileSize(self):
global file_size
file_size = self.stream.filesize / 1000000
self.getPermisionToContinue()
def getPermisionToContinue(self):
print(
"\n Title : {0} \n Author : {1} \n Size : {2:.2f}MB \n Resolution : {3} \n FPS : {4} \n ".format(
self.youtube.title,
self.youtube.author,
file_size,
self.stream.resolution,
self.stream.fps,
)
)
if input("Do you want it ?(default = (y)es) or (n)o ") == "n":
self.showStreams()
else:
self.main()
def download(self):
self.stream.download()
@staticmethod
def onProgress(stream=None, chunk=None, remaining=None):
file_downloaded = file_size - (remaining / 1000000)
print(
f"Downloading ... {file_downloaded / file_size * 100:0.2f} % [{file_downloaded:.1f}MB of {file_size:.1f}MB]",
end="\r",
)
def main(self):
try:
self.download()
except KeyboardInterrupt:
print("Canceled. ")
sys.exit(0)
if __name__ == "__main__":
try:
YouTubeDownloder()
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
| YouTubeDownloder |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_filters.py | {
"start": 10974,
"end": 14133
} | class ____(OrganizationFilterTestCase):
def get_filterset_for_user(self, user, organization, data=None, **kwargs):
self.client.force_login(user)
url = reverse("organization_team_list", kwargs={"slug": organization.slug})
resp = self.client.get(url, data=data)
return resp.context_data.get("filter")
@pytest.mark.parametrize(
"user,organization,teams",
[
("user_a", "org_a", ["team_a"]),
("owner_a", "org_a", ["team_a", "team_a_empty"]),
("user_b", "org_b", ["team_b"]),
("owner_b", "org_b", ["team_b"]),
],
indirect=True,
)
def test_unfiltered_queryset(self, user, organization, teams):
"""No active filters returns full queryset."""
filter = self.get_filterset_for_user(
user,
organization,
)
assertQuerySetEqual(
filter.qs,
teams,
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,team",
[
("user_a", "org_a", "team_a"),
("owner_a", "org_a", "team_a"),
("owner_a", "org_a", "team_a_empty"),
("user_b", "org_b", "team_b"),
("owner_b", "org_b", "team_b"),
],
indirect=True,
)
def test_filtered_queryset_team_choice(self, user, organization, team):
"""Valid team choice returns expected results."""
filter = self.get_filterset_for_user(
user,
organization,
data={"slug": team.slug},
)
assertQuerySetEqual(
filter.qs,
[team],
transform=lambda o: o,
ordered=False,
)
@pytest.mark.parametrize(
"user,organization,team",
[
("user_a", "org_a", "team_a"),
("owner_a", "org_a", "team_a"),
("user_b", "org_b", "team_b"),
("owner_b", "org_b", "team_b"),
],
indirect=True,
)
def test_filtered_queryset_team_invalid_choice(self, user, organization, team):
"""Invalid team choice returns the original queryset."""
wrong_team = fixture.get(Team)
filter = self.get_filterset_for_user(
user,
organization,
{"slug": wrong_team.slug},
)
assert not filter.is_valid()
@pytest.mark.parametrize(
"user,organization,teams",
[
("user_a", "org_a", ["team_a"]),
("owner_a", "org_a", ["team_a", "team_a_empty"]),
("user_b", "org_b", ["team_b"]),
("owner_b", "org_b", ["team_b"]),
],
indirect=True,
)
def test_team_filter_choices(self, user, organization, teams):
"""Team filter choices limited to organization teams."""
filter = self.get_filterset_for_user(
user,
organization,
)
choices = [team.slug for team in teams]
choices.insert(0, "")
assert list(dict(filter.filters["slug"].field.choices).keys()) == choices
| TestOrganizationTeamListFilterSet |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 36105,
"end": 36738
} | class ____(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self,
element: Any,
resolved: Any,
argname: Optional[str] = None,
*,
explicit_subquery: bool = False,
**kw: Any,
) -> Any:
if resolved._is_select_base and explicit_subquery:
return resolved.subquery()
self._raise_for_expected(element, argname, resolved)
def _post_coercion(self, element, *, deannotate=False, **kw):
if deannotate:
return element._deannotate()
else:
return element
| FromClauseImpl |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_mwaa.py | {
"start": 1163,
"end": 12629
} | class ____:
@pytest.fixture
def mock_conn(self):
with mock.patch.object(MwaaHook, "conn") as m:
yield m
def setup_method(self):
self.hook = MwaaHook()
def test_init(self):
assert self.hook.client_type == "mwaa"
@mock_aws
def test_get_conn(self):
assert self.hook.conn is not None
@pytest.mark.parametrize(
"body",
[
pytest.param(None, id="no_body"),
pytest.param(BODY, id="non_empty_body"),
],
)
def test_invoke_rest_api_success(self, body, mock_conn, example_responses):
boto_invoke_mock = mock.MagicMock(return_value=example_responses["success"])
mock_conn.invoke_rest_api = boto_invoke_mock
retval = self.hook.invoke_rest_api(
env_name=ENV_NAME, path=PATH, method=METHOD, body=body, query_params=QUERY_PARAMS
)
kwargs_to_assert = {
"Name": ENV_NAME,
"Path": PATH,
"Method": METHOD,
"Body": body if body else {},
"QueryParameters": QUERY_PARAMS,
}
boto_invoke_mock.assert_called_once_with(**kwargs_to_assert)
mock_conn.create_web_login_token.assert_not_called()
assert retval == {k: v for k, v in example_responses["success"].items() if k != "ResponseMetadata"}
def test_invoke_rest_api_failure(self, mock_conn, example_responses):
error = ClientError(error_response=example_responses["failure"], operation_name="invoke_rest_api")
mock_conn.invoke_rest_api = mock.MagicMock(side_effect=error)
mock_error_log = mock.MagicMock()
self.hook.log.error = mock_error_log
with pytest.raises(ClientError) as caught_error:
self.hook.invoke_rest_api(env_name=ENV_NAME, path=PATH, method=METHOD)
assert caught_error.value == error
mock_conn.create_web_login_token.assert_not_called()
expected_log = {k: v for k, v in example_responses["failure"].items() if k != "ResponseMetadata"}
mock_error_log.assert_called_once_with(expected_log)
@pytest.mark.parametrize("generate_local_token", [pytest.param(True), pytest.param(False)])
@mock.patch("airflow.providers.amazon.aws.hooks.mwaa.requests.Session")
def test_invoke_rest_api_local_token_parameter(
self, mock_create_session, generate_local_token, mock_conn
):
self.hook.invoke_rest_api(
env_name=ENV_NAME, path=PATH, method=METHOD, generate_local_token=generate_local_token
)
if generate_local_token:
mock_conn.invoke_rest_api.assert_not_called()
mock_conn.create_web_login_token.assert_called_once()
mock_create_session.assert_called_once()
mock_create_session.return_value.request.assert_called_once()
else:
mock_conn.invoke_rest_api.assert_called_once()
@mock.patch.object(MwaaHook, "_get_session_conn")
def test_invoke_rest_api_fallback_success_when_iam_fails_with_airflow2(
self, mock_get_session_conn, mock_conn, example_responses
):
boto_invoke_error = ClientError(
error_response=example_responses["missingIamRole"], operation_name="invoke_rest_api"
)
mock_conn.invoke_rest_api = mock.MagicMock(side_effect=boto_invoke_error)
kwargs_to_assert = {
"method": METHOD,
"url": f"https://{HOSTNAME}/api/v1{PATH}",
"params": QUERY_PARAMS,
"headers": {},
"json": BODY,
"timeout": 10,
}
mock_response = mock.MagicMock()
mock_response.status_code = example_responses["success"]["RestApiStatusCode"]
mock_response.json.return_value = example_responses["success"]["RestApiResponse"]
mock_session = mock.MagicMock()
mock_session.request.return_value = mock_response
mock_get_session_conn.return_value = (mock_session, HOSTNAME, None)
retval = self.hook.invoke_rest_api(
env_name=ENV_NAME, path=PATH, method=METHOD, body=BODY, query_params=QUERY_PARAMS
)
mock_session.request.assert_called_once_with(**kwargs_to_assert)
mock_response.raise_for_status.assert_called_once()
assert retval == {k: v for k, v in example_responses["success"].items() if k != "ResponseMetadata"}
@mock.patch.object(MwaaHook, "_get_session_conn")
def test_invoke_rest_api_fallback_success_when_iam_fails_with_airflow3(
self, mock_get_session_conn, mock_conn, example_responses
):
boto_invoke_error = ClientError(
error_response=example_responses["missingIamRole"], operation_name="invoke_rest_api"
)
mock_conn.invoke_rest_api = mock.MagicMock(side_effect=boto_invoke_error)
kwargs_to_assert = {
"method": METHOD,
"url": f"https://{HOSTNAME}/api/v2{PATH}",
"params": QUERY_PARAMS,
"headers": {
"Authorization": "Bearer token",
"Content-Type": "application/json",
},
"json": BODY,
"timeout": 10,
}
mock_response = mock.MagicMock()
mock_response.status_code = example_responses["success"]["RestApiStatusCode"]
mock_response.json.return_value = example_responses["success"]["RestApiResponse"]
mock_session = mock.MagicMock()
mock_session.request.return_value = mock_response
mock_login_response = mock.MagicMock()
mock_login_response.cookies = {"_token": "token"}
mock_get_session_conn.return_value = (mock_session, HOSTNAME, mock_login_response)
retval = self.hook.invoke_rest_api(
env_name=ENV_NAME,
path=PATH,
method=METHOD,
body=BODY,
query_params=QUERY_PARAMS,
airflow_version=3,
)
mock_session.request.assert_called_once_with(**kwargs_to_assert)
mock_response.raise_for_status.assert_called_once()
assert retval == {k: v for k, v in example_responses["success"].items() if k != "ResponseMetadata"}
@mock.patch.object(MwaaHook, "_get_session_conn")
def test_invoke_rest_api_using_local_session_token_failure(
self, mock_get_session_conn, example_responses
):
mock_response = mock.MagicMock()
mock_response.json.return_value = example_responses["failure"]["RestApiResponse"]
error = requests.HTTPError(response=mock_response)
mock_response.raise_for_status.side_effect = error
mock_session = mock.MagicMock()
mock_session.request.return_value = mock_response
mock_get_session_conn.return_value = (mock_session, HOSTNAME, None)
mock_error_log = mock.MagicMock()
self.hook.log.error = mock_error_log
with pytest.raises(requests.HTTPError) as caught_error:
self.hook.invoke_rest_api(env_name=ENV_NAME, path=PATH, method=METHOD, generate_local_token=True)
assert caught_error.value == error
mock_error_log.assert_called_once_with(example_responses["failure"]["RestApiResponse"])
@mock.patch("airflow.providers.amazon.aws.hooks.mwaa.requests.Session")
def test_get_session_conn_airflow2(self, mock_create_session, mock_conn):
token = "token"
mock_conn.create_web_login_token.return_value = {"WebServerHostname": HOSTNAME, "WebToken": token}
login_url = f"https://{HOSTNAME}/aws_mwaa/login"
login_payload = {"token": token}
mock_session = mock.MagicMock()
mock_login_response = mock.MagicMock()
mock_session.post.return_value = mock_login_response
mock_create_session.return_value = mock_session
retval = self.hook._get_session_conn(env_name=ENV_NAME, airflow_version=2)
mock_conn.create_web_login_token.assert_called_once_with(Name=ENV_NAME)
mock_create_session.assert_called_once_with()
mock_session.post.assert_called_once_with(login_url, data=login_payload, timeout=10)
mock_session.post.return_value.raise_for_status.assert_called_once()
assert retval == (mock_session, HOSTNAME, mock_login_response)
@mock.patch("airflow.providers.amazon.aws.hooks.mwaa.requests.Session")
def test_get_session_conn_airflow3(self, mock_create_session, mock_conn):
token = "token"
mock_conn.create_web_login_token.return_value = {"WebServerHostname": HOSTNAME, "WebToken": token}
login_url = f"https://{HOSTNAME}/pluginsv2/aws_mwaa/login"
login_payload = {"token": token}
mock_session = mock.MagicMock()
mock_login_response = mock.MagicMock()
mock_session.post.return_value = mock_login_response
mock_create_session.return_value = mock_session
retval = self.hook._get_session_conn(env_name=ENV_NAME, airflow_version=3)
mock_conn.create_web_login_token.assert_called_once_with(Name=ENV_NAME)
mock_create_session.assert_called_once_with()
mock_session.post.assert_called_once_with(login_url, data=login_payload, timeout=10)
mock_session.post.return_value.raise_for_status.assert_called_once()
assert retval == (mock_session, HOSTNAME, mock_login_response)
@pytest.fixture
def example_responses(self):
"""Fixture for test responses to avoid mutation between tests."""
return {
"success": {
"ResponseMetadata": {
"RequestId": "some ID",
"HTTPStatusCode": 200,
"HTTPHeaders": {"header1": "value1"},
"RetryAttempts": 0,
},
"RestApiStatusCode": 200,
"RestApiResponse": {
"conf": {},
"dag_id": "hello_world",
"dag_run_id": "manual__2025-02-08T00:33:09.457198+00:00",
"data_interval_end": "2025-02-08T00:33:09.457198+00:00",
"data_interval_start": "2025-02-08T00:33:09.457198+00:00",
"execution_date": "2025-02-08T00:33:09.457198+00:00",
"logical_date": "2025-02-08T00:33:09.457198+00:00",
"run_type": "manual",
"state": "queued",
},
},
"failure": {
"Error": {"Message": "", "Code": "RestApiClientException"},
"ResponseMetadata": {
"RequestId": "some ID",
"HTTPStatusCode": 400,
"HTTPHeaders": {"header1": "value1"},
"RetryAttempts": 0,
},
"RestApiStatusCode": 404,
"RestApiResponse": {
"detail": "DAG with dag_id: 'hello_world1' not found",
"status": 404,
"title": "DAG not found",
"type": "https://airflow.apache.org/docs/apache-airflow/2.10.3/stable-rest-api-ref.html#section/Errors/NotFound",
},
},
"missingIamRole": {
"Error": {"Message": "No Airflow role granted in IAM.", "Code": "AccessDeniedException"},
"ResponseMetadata": {
"RequestId": "some ID",
"HTTPStatusCode": 403,
"HTTPHeaders": {"header1": "value1"},
"RetryAttempts": 0,
},
},
}
| TestMwaaHook |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 25802,
"end": 26466
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer
| Blip2QFormerIntermediate |
python | numba__numba | numba/core/typed_passes.py | {
"start": 10611,
"end": 12867
} | class ____(FunctionPass):
_name = "parfor_pass"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
"""
Convert data-parallel computations into Parfor nodes
"""
# Ensure we have an IR and type information.
assert state.func_ir
parfor_pass = _parfor_ParforPass(state.func_ir,
state.typemap,
state.calltypes,
state.return_type,
state.typingctx,
state.targetctx,
state.flags.auto_parallel,
state.flags,
state.metadata,
state.parfor_diagnostics)
parfor_pass.run()
# check the parfor pass worked and warn if it didn't
has_parfor = False
for blk in state.func_ir.blocks.values():
for stmnt in blk.body:
if isinstance(stmnt, Parfor):
has_parfor = True
break
else:
continue
break
if not has_parfor:
# parfor calls the compiler chain again with a string
if not (config.DISABLE_PERFORMANCE_WARNINGS or
state.func_ir.loc.filename == '<string>'):
url = ("https://numba.readthedocs.io/en/stable/user/"
"parallel.html#diagnostics")
msg = ("\nThe keyword argument 'parallel=True' was specified "
"but no transformation for parallel execution was "
"possible.\n\nTo find out why, try turning on parallel "
"diagnostics, see %s for help." % url)
warnings.warn(errors.NumbaPerformanceWarning(msg,
state.func_ir.loc))
# Add reload function to initialize the parallel backend.
state.reload_init.append(_reload_parfors)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| ParforPass |
python | pytorch__pytorch | torch/_dynamo/types.py | {
"start": 1402,
"end": 1624
} | class ____(NamedTuple):
# A string repr of the piece of failed guard code we eval-ed
reason: str
# A code object where we failed a guard
orig_code: types.CodeType
@dataclasses.dataclass(frozen=True)
| GuardFail |
python | google__flatbuffers | tests/namespace_test/NamespaceA/NamespaceB/StructInNestedNS.py | {
"start": 181,
"end": 908
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 8
# StructInNestedNS
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StructInNestedNS
def A(self):
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0),
)
# StructInNestedNS
def B(self):
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4),
)
def CreateStructInNestedNS(builder, a, b):
builder.Prep(4, 8)
builder.PrependInt32(b)
builder.PrependInt32(a)
return builder.Offset()
| StructInNestedNS |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 43539,
"end": 43918
} | class ____:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
| TestCbrt |
python | sympy__sympy | sympy/polys/domains/gmpyintegerring.py | {
"start": 418,
"end": 3037
} | class ____(IntegerRing):
"""Integer ring based on GMPY's ``mpz`` type.
This will be the implementation of :ref:`ZZ` if ``gmpy`` or ``gmpy2`` is
installed. Elements will be of type ``gmpy.mpz``.
"""
dtype = GMPYInteger
zero = dtype(0)
one = dtype(1)
tp = type(one)
alias = 'ZZ_gmpy'
def __init__(self):
"""Allow instantiation of this domain. """
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(int(a))
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Integer:
return GMPYInteger(a.p)
elif int_valued(a):
return GMPYInteger(int(a))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to GMPY's ``mpz``. """
return K0.to_int(a)
def from_ZZ_python(K1, a, K0):
"""Convert Python's ``int`` to GMPY's ``mpz``. """
return GMPYInteger(a)
def from_QQ(K1, a, K0):
"""Convert Python's ``Fraction`` to GMPY's ``mpz``. """
if a.denominator == 1:
return GMPYInteger(a.numerator)
def from_QQ_python(K1, a, K0):
"""Convert Python's ``Fraction`` to GMPY's ``mpz``. """
if a.denominator == 1:
return GMPYInteger(a.numerator)
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to GMPY's ``mpz``. """
return K0.to_int(a)
def from_ZZ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpz`` to GMPY's ``mpz``. """
return a
def from_QQ_gmpy(K1, a, K0):
"""Convert GMPY ``mpq`` to GMPY's ``mpz``. """
if a.denominator == 1:
return a.numerator
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to GMPY's ``mpz``. """
p, q = K0.to_rational(a)
if q == 1:
return GMPYInteger(p)
def from_GaussianIntegerRing(K1, a, K0):
if a.y == 0:
return a.x
def gcdex(self, a, b):
"""Compute extended GCD of ``a`` and ``b``. """
h, s, t = gmpy_gcdex(a, b)
return s, t, h
def gcd(self, a, b):
"""Compute GCD of ``a`` and ``b``. """
return gmpy_gcd(a, b)
def lcm(self, a, b):
"""Compute LCM of ``a`` and ``b``. """
return gmpy_lcm(a, b)
def sqrt(self, a):
"""Compute square root of ``a``. """
return gmpy_sqrt(a)
def factorial(self, a):
"""Compute factorial of ``a``. """
return gmpy_factorial(a)
| GMPYIntegerRing |
python | cherrypy__cherrypy | cherrypy/lib/httputil.py | {
"start": 17505,
"end": 18160
} | class ____(object):
"""An internet address.
name
Should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = '0.0.0.0'
port = 80
name = 'unknown.tld'
def __init__(self, ip, port, name=None):
"""Initialize a TCP service representation."""
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
"""Render a :class:`Host` instance representation."""
return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)
| Host |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 103278,
"end": 104367
} | class ____(torch.nn.Module):
def forward(self, primals_5: "Sym(3)", primals_6: "Sym(4)", tangents_1: "f32[12]", tangents_2: "f32[12]"):
view_2: "f32[3, 4]" = torch.ops.aten.view.default(tangents_1, [primals_5, primals_6]); tangents_1 = None
view_3: "f32[3, 4]" = torch.ops.aten.view.default(tangents_2, [primals_5, primals_6]); tangents_2 = primals_5 = primals_6 = None
return [view_2, view_3, None, None]
""", # noqa: B950
)
def test_tensor_subclass_TwoTensor_automatic_dynamic_shapes(self):
def f(tt):
y = tt.clone()
return y.a, y.view(-1), y.b
a = torch.ones(3, 4, requires_grad=True)
b = a.clone()
tt1 = TwoTensor(a, b)
a = torch.ones(3, 5, requires_grad=True)
b = a.clone()
tt2 = TwoTensor(a, b)
fw, bw = self._compile_check(
f, [(tt1,), (tt2,)], dynamic=None, call_backward=True
)
self.assertExpectedInline(
normalize_gm(fw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF053.py | {
"start": 1717,
"end": 1799
} | class ____[T, _C: (str, bytes)](Generic[_D]): ... # TODO: Type parameter defaults
| C |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/styled.py | {
"start": 225,
"end": 1288
} | class ____:
"""Apply a style to a renderable.
Args:
renderable (RenderableType): Any renderable.
style (StyleType): A style to apply across the entire renderable.
"""
def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
self.renderable = renderable
self.style = style
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style)
rendered_segments = console.render(self.renderable, options)
segments = Segment.apply_style(rendered_segments, style)
return segments
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
return Measurement.get(console, options, self.renderable)
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich import print
from pipenv.patched.pip._vendor.rich.panel import Panel
panel = Styled(Panel("hello"), "on blue")
print(panel)
| Styled |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault3.py | {
"start": 525,
"end": 752
} | class ____(dict[T2, T1], Generic[T1, T2]): ...
# This should generate an error because T1 is after T2.
def funcA(a: T2, b: T1) -> T1 | T2: ...
# This should generate an error because T1 is after T2.
TA_A = dict[T2, T1]
| ClassC |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 67848,
"end": 67924
} | class ____(_WriteOnlyFixture, DynamicHistoryTest):
pass
| WriteOnlyHistoryTest |
python | getsentry__sentry | src/sentry/relocation/api/endpoints/recover.py | {
"start": 1245,
"end": 5462
} | class ____(Endpoint):
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
# TODO(getsentry/team-ospo#214): Stabilize before GA.
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (SuperuserOrStaffFeatureFlaggedPermission,)
def _recover(self, request: Request, relocation: Relocation) -> Response | None:
"""
Helper function to do just... one... more... attempt of a the last task that the relocation
failed at. Useful to try to recover a relocation after a fix has been pushed.
"""
until_step = request.data.get("untilStep", None)
if until_step is not None:
try:
step = Relocation.Step[until_step.upper()]
except KeyError:
return Response(
{"detail": ERR_UNKNOWN_RELOCATION_STEP.substitute(step=until_step)},
status=400,
)
if step in {
Relocation.Step.UNKNOWN,
Relocation.Step.UPLOADING,
Relocation.Step.COMPLETED,
}:
return Response(
{"detail": ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(step=step.name)},
status=400,
)
if step.value <= relocation.step:
return Response(
{"detail": ERR_COULD_NOT_PAUSE_RELOCATION_AT_STEP.substitute(step=step.name)},
status=400,
)
relocation.scheduled_pause_at_step = step.value
if relocation.status != Relocation.Status.FAILURE.value:
return Response(
{
"detail": ERR_NOT_RECOVERABLE_STATUS.substitute(
status=Relocation.Status(relocation.status).name
)
},
status=400,
)
ordered_task = OrderedTask[relocation.latest_task]
task = TASK_MAP[ordered_task]
if ordered_task in {OrderedTask.VALIDATING_POLL, OrderedTask.VALIDATING_COMPLETE}:
return Response(
{"detail": ERR_NOT_RECOVERABLE_STEP},
status=400,
)
relocation.status = Relocation.Status.IN_PROGRESS.value
relocation.latest_task_attempts -= 1
try:
relocation.save()
except DatabaseError:
return Response(
{"detail": ERR_COULD_NOT_RECOVER_RELOCATION},
status=400,
)
task.delay(str(relocation.uuid))
return None
def put(self, request: Request, relocation_uuid: str) -> Response:
"""
Recover a failed relocation, perhaps after a bug fix, by running the last attempted task.
``````````````````````````````````````````````````
This command accepts a single optional parameter, which specifies the step BEFORE which the
next pause should occur. If no such parameter is specified, no future pauses are scheduled.
:pparam string relocation_uuid: a UUID identifying the relocation.
:param string untilStep: an optional string identifying the next step to pause before; must
be greater than the currently active step, and one of:
`PREPROCESSING`, `VALIDATING`, `IMPORTING`, `POSTPROCESSING`,
`NOTIFYING`.
:auth: required
"""
logger.info("relocations.recover.put.start", extra={"caller": request.user.id})
# Use a `select_for_update` transaction to prevent duplicate tasks from being started by
# racing recover calls.
with transaction.atomic(using=router.db_for_write(Relocation)):
try:
relocation: Relocation = Relocation.objects.select_for_update().get(
uuid=relocation_uuid
)
except Relocation.DoesNotExist:
raise ResourceDoesNotExist
failed = self._recover(request, relocation)
if failed is not None:
return failed
return self.respond(serialize(relocation))
| RelocationRecoverEndpoint |
python | pytorch__pytorch | tools/test/test_codegen.py | {
"start": 17699,
"end": 20145
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.backend_indices: dict[DispatchKey, dict[OperatorName, BackendMetadata]] = (
defaultdict(dict)
)
yaml_entry = """
- func: op.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
CompositeExplicitAutograd: op
"""
es = yaml.load(yaml_entry, Loader=LineLoader)
self.one_return_func, m = NativeFunction.from_yaml(
es[0], loc=Location(__file__, 1), valid_tags=set()
)
BackendIndex.grow_index(self.backend_indices, m)
dispatch_key = DispatchKey.CompositeExplicitAutograd
self.assertTrue(dispatch_key in self.backend_indices)
self.indices = [
BackendIndex(
dispatch_key=dispatch_key,
use_out_as_primary=True,
external=False,
device_guard=False,
index=self.backend_indices[dispatch_key],
)
]
def test_op_with_1_backend_generates_static_dispatch(self) -> None:
disp_sig = DispatcherSignature.from_schema(self.one_return_func.func)
with native_function_manager(self.one_return_func):
out = static_dispatch(
sig=disp_sig,
f=self.one_return_func,
backend_indices=self.indices,
)
self.assertEqual(
out, "return at::compositeexplicitautograd::op_out(out, self);"
)
def test_op_with_cpp_sig_generates_static_dispatch(self) -> None:
sig_group = CppSignatureGroup.from_native_function(
self.one_return_func,
method=False,
fallback_binding=self.one_return_func.manual_cpp_binding,
)
# cpp signature puts out at the front
with native_function_manager(self.one_return_func):
out = static_dispatch(
sig=sig_group.signature,
f=self.one_return_func,
backend_indices=self.indices,
)
self.assertEqual(
out, "return at::compositeexplicitautograd::op_out(out, self);"
)
# Represents the most basic NativeFunction. Use dataclasses.replace()
# to edit for use.
DEFAULT_NATIVE_FUNCTION, _ = NativeFunction.from_yaml(
{"func": "func() -> bool"},
loc=Location(__file__, 1),
valid_tags=set(),
)
if __name__ == "__main__":
unittest.main()
| TestStaticDispatchGeneratrion |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 3144,
"end": 3260
} | class ____(TestRss2Feed):
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
| TestAtomFeed |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/triggers/test_wasb.py | {
"start": 5230,
"end": 9002
} | class ____:
TRIGGER = WasbPrefixSensorTrigger(
container_name=TEST_DATA_STORAGE_CONTAINER_NAME,
prefix=TEST_DATA_STORAGE_BLOB_PREFIX,
wasb_conn_id=TEST_WASB_CONN_ID,
poke_interval=POKE_INTERVAL,
check_options={"delimiter": "/", "include": None},
)
def test_serialization(self):
"""
Asserts that the WasbPrefixSensorTrigger correctly serializes its arguments and classpath."""
classpath, kwargs = self.TRIGGER.serialize()
assert classpath == "airflow.providers.microsoft.azure.triggers.wasb.WasbPrefixSensorTrigger"
assert kwargs == {
"container_name": TEST_DATA_STORAGE_CONTAINER_NAME,
"prefix": TEST_DATA_STORAGE_BLOB_PREFIX,
"wasb_conn_id": TEST_WASB_CONN_ID,
"public_read": False,
"check_options": {
"delimiter": "/",
"include": None,
},
"poke_interval": POKE_INTERVAL,
}
@pytest.mark.asyncio
@pytest.mark.parametrize(
"prefix_exists",
[True, False],
)
@mock.patch("airflow.providers.microsoft.azure.hooks.wasb.WasbAsyncHook.check_for_prefix_async")
async def test_running(self, mock_check_for_prefix, prefix_exists):
"""Test if the task is run in trigger successfully."""
mock_check_for_prefix.return_value = prefix_exists
task = asyncio.create_task(self.TRIGGER.run().__anext__())
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.microsoft.azure.hooks.wasb.WasbAsyncHook.check_for_prefix_async")
async def test_success(self, mock_check_for_prefix):
"""Tests the success state for that the WasbPrefixSensorTrigger."""
mock_check_for_prefix.return_value = True
task = asyncio.create_task(self.TRIGGER.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was returned
assert task.done() is True
asyncio.get_event_loop().stop()
message = (
f"Prefix {TEST_DATA_STORAGE_BLOB_PREFIX} found in container {TEST_DATA_STORAGE_CONTAINER_NAME}."
)
assert task.result() == TriggerEvent({"status": "success", "message": message})
@pytest.mark.asyncio
@mock.patch("airflow.providers.microsoft.azure.hooks.wasb.WasbAsyncHook.check_for_prefix_async")
async def test_waiting_for_blob(self, mock_check_for_prefix):
"""Tests the WasbPrefixSensorTrigger sleeps waiting for the blob to arrive."""
mock_check_for_prefix.side_effect = [False, True]
with mock.patch.object(self.TRIGGER.log, "info") as mock_log_info:
task = asyncio.create_task(self.TRIGGER.run().__anext__())
await asyncio.sleep(POKE_INTERVAL + 0.5)
if not task.done():
message = (
f"Prefix {TEST_DATA_STORAGE_BLOB_PREFIX} not available yet in container "
f"{TEST_DATA_STORAGE_CONTAINER_NAME}. Sleeping for {POKE_INTERVAL} seconds"
)
mock_log_info.assert_called_once_with(message)
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.microsoft.azure.hooks.wasb.WasbAsyncHook.check_for_prefix_async")
async def test_trigger_exception(self, mock_check_for_prefix):
"""Tests the WasbPrefixSensorTrigger yields an error event if there is an exception."""
mock_check_for_prefix.side_effect = Exception("Test exception")
task = [i async for i in self.TRIGGER.run()]
assert len(task) == 1
assert TriggerEvent({"status": "error", "message": "Test exception"}) in task
| TestWasbPrefixSensorTrigger |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 19633,
"end": 19820
} | class ____(Markdown):
def __init__(self, proto: MarkdownProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "latex"
@dataclass(repr=False)
| Latex |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/agent.py | {
"start": 841,
"end": 954
} | class ____(BaseModel):
"""GET /api/agents response."""
items: list[DgApiAgent]
total: int
| DgApiAgentList |
python | ansible__ansible | test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/filter/filter_subdir/in_subdir.py | {
"start": 356,
"end": 537
} | class ____(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'noop': nochange,
'nested': nochange,
}
| FilterModule |
python | doocs__leetcode | solution/2700-2799/2764.Is Array a Preorder of Some Binary Tree/Solution.py | {
"start": 0,
"end": 406
} | class ____:
def isPreorder(self, nodes: List[List[int]]) -> bool:
def dfs(i: int) -> int:
nonlocal k
if i != nodes[k][0]:
return False
k += 1
return all(dfs(j) for j in g[i])
g = defaultdict(list)
for i, p in nodes:
g[p].append(i)
k = 0
return dfs(nodes[0][0]) and k == len(nodes)
| Solution |
python | getsentry__sentry-python | tests/test_monitor.py | {
"start": 258,
"end": 2806
} | class ____(HealthyTestTransport):
def is_healthy(self):
return False
def test_no_monitor_if_disabled(sentry_init):
sentry_init(
transport=HealthyTestTransport(),
enable_backpressure_handling=False,
)
assert sentry_sdk.get_client().monitor is None
def test_monitor_if_enabled(sentry_init):
sentry_init(transport=HealthyTestTransport())
monitor = sentry_sdk.get_client().monitor
assert monitor is not None
assert monitor._thread is None
assert monitor.is_healthy() is True
assert monitor.downsample_factor == 0
assert monitor._thread is not None
assert monitor._thread.name == "sentry.monitor"
def test_monitor_unhealthy(sentry_init):
sentry_init(transport=UnhealthyTestTransport())
monitor = sentry_sdk.get_client().monitor
monitor.interval = 0.1
assert monitor.is_healthy() is True
for i in range(15):
monitor.run()
assert monitor.is_healthy() is False
assert monitor.downsample_factor == (i + 1 if i < 10 else 10)
def test_transaction_uses_downsampled_rate(
sentry_init, capture_record_lost_event_calls, monkeypatch
):
sentry_init(
traces_sample_rate=1.0,
transport=UnhealthyTestTransport(),
)
record_lost_event_calls = capture_record_lost_event_calls()
monitor = sentry_sdk.get_client().monitor
monitor.interval = 0.1
assert monitor.is_healthy() is True
monitor.run()
assert monitor.is_healthy() is False
assert monitor.downsample_factor == 1
# make sure we don't sample the transaction
with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=750000):
with sentry_sdk.start_transaction(name="foobar") as transaction:
assert transaction.sampled is False
assert transaction.sample_rate == 0.5
assert Counter(record_lost_event_calls) == Counter(
[
("backpressure", "transaction", None, 1),
("backpressure", "span", None, 1),
]
)
def test_monitor_no_thread_on_shutdown_no_errors(sentry_init):
sentry_init(transport=HealthyTestTransport())
# make it seem like the interpreter is shutting down
with mock.patch(
"threading.Thread.start",
side_effect=RuntimeError("can't create new thread at interpreter shutdown"),
):
monitor = sentry_sdk.get_client().monitor
assert monitor is not None
assert monitor._thread is None
monitor.run()
assert monitor._thread is None
| UnhealthyTestTransport |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 42156,
"end": 45997
} | class ____(Request):
"""
Returns metrics of the company queues. The metrics are avaraged in the specified interval.
:param from_date: Starting time (in seconds from epoch) for collecting metrics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting metrics
:type to_date: float
:param interval: Time interval in seconds for a single metrics point. The
minimal value is 1
:type interval: int
:param queue_ids: List of queue ids to collect metrics for. If not provided or
empty then all then average metrics across all the company queues will be
returned.
:type queue_ids: Sequence[str]
"""
_service = "queues"
_action = "get_queue_metrics"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting metrics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single metrics point. The minimal value is 1",
"type": "integer",
},
"queue_ids": {
"description": "List of queue ids to collect metrics for. If not provided or empty then all then average metrics across all the company queues will be returned.",
"items": {"type": "string"},
"type": "array",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting metrics",
"type": "number",
},
},
"required": ["from_date", "to_date", "interval"],
"type": "object",
}
def __init__(
self, from_date: float, to_date: float, interval: int, queue_ids: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetQueueMetricsRequest, self).__init__(**kwargs)
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.queue_ids = queue_ids
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property("queue_ids")
def queue_ids(self) -> Optional[List[str]]:
return self._property_queue_ids
@queue_ids.setter
def queue_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_queue_ids = None
return
self.assert_isinstance(value, "queue_ids", (list, tuple))
self.assert_isinstance(value, "queue_ids", six.string_types, is_array=True)
self._property_queue_ids = value
| GetQueueMetricsRequest |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 47774,
"end": 53126
} | class ____(
AssertsCompiledSQL, fixtures.TestBase, AssertsExecutionResults
):
__sparse_driver_backend__ = True
__only_on__ = "postgresql > 8.3"
@testing.requires.postgresql_working_nullable_domains
def test_domain_type_reflection(self, metadata, connection):
positive_int = DOMAIN(
"positive_int", Integer(), check="value > 0", not_null=True
)
my_str = DOMAIN("my_string", Text(), collation="C", default="~~")
Table(
"table",
metadata,
Column("value", positive_int),
Column("str", my_str),
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("table", m2, autoload_with=connection)
vt = t2.c.value.type
is_true(isinstance(vt, DOMAIN))
is_true(isinstance(vt.data_type, Integer))
eq_(vt.name, "positive_int")
eq_(str(vt.check), "VALUE > 0")
is_(vt.default, None)
is_(vt.collation, None)
is_true(vt.constraint_name is not None)
is_true(vt.not_null)
is_false(vt.create_type)
st = t2.c.str.type
is_true(isinstance(st, DOMAIN))
is_true(isinstance(st.data_type, Text))
eq_(st.name, "my_string")
is_(st.check, None)
is_true("~~" in st.default)
eq_(st.collation, "C")
is_(st.constraint_name, None)
is_false(st.not_null)
is_false(st.create_type)
def test_domain_create_table(self, metadata, connection):
metadata = self.metadata
Email = DOMAIN(
name="email",
data_type=Text,
check=r"VALUE ~ '[^@]+@[^@]+\.[^@]+'",
)
PosInt = DOMAIN(
name="pos_int",
data_type=Integer,
not_null=True,
check=r"VALUE > 0",
)
t1 = Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column("email", Email),
Column("number", PosInt),
)
t1.create(connection)
t1.create(connection, checkfirst=True) # check the create
connection.execute(
t1.insert(), {"email": "test@example.com", "number": 42}
)
connection.execute(t1.insert(), {"email": "a@b.c", "number": 1})
connection.execute(
t1.insert(), {"email": "example@gmail.co.uk", "number": 99}
)
eq_(
connection.execute(t1.select().order_by(t1.c.id)).fetchall(),
[
(1, "test@example.com", 42),
(2, "a@b.c", 1),
(3, "example@gmail.co.uk", 99),
],
)
@testing.combinations(
(Integer, "value > 0", 4),
(String, "value != ''", "hello world"),
(
UUID,
"value != '{00000000-0000-0000-0000-000000000000}'",
uuid.uuid4(),
),
(
DateTime,
"value >= '2020-01-01T00:00:00'",
datetime.datetime.fromisoformat("2021-01-01T00:00:00.000"),
),
argnames="domain_datatype, domain_check, value",
)
def test_domain_roundtrip(
self, metadata, connection, domain_datatype, domain_check, value
):
table = Table(
"domain_roundtrip_test",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value",
DOMAIN("valuedomain", domain_datatype, check=domain_check),
),
)
table.create(connection)
connection.execute(table.insert(), {"value": value})
results = connection.execute(
table.select().order_by(table.c.id)
).fetchall()
eq_(results, [(1, value)])
@testing.combinations(
(DOMAIN("pos_int", Integer, check="VALUE > 0", not_null=True), 4, -4),
(
DOMAIN("email", String, check=r"VALUE ~ '[^@]+@[^@]+\.[^@]+'"),
"e@xample.com",
"fred",
),
argnames="domain,pass_value,fail_value",
)
def test_check_constraint(
self, metadata, connection, domain, pass_value, fail_value
):
table = Table("table", metadata, Column("value", domain))
table.create(connection)
connection.execute(table.insert(), {"value": pass_value})
# psycopg/psycopg2 raise IntegrityError, while pg8000 raises
# ProgrammingError
with expect_raises(exc.DatabaseError):
connection.execute(table.insert(), {"value": fail_value})
@testing.combinations(
(DOMAIN("nullable_domain", Integer, not_null=True), 1),
(DOMAIN("non_nullable_domain", Integer, not_null=False), 1),
argnames="domain,pass_value",
)
def test_domain_nullable(self, metadata, connection, domain, pass_value):
table = Table("table", metadata, Column("value", domain))
table.create(connection)
connection.execute(table.insert(), {"value": pass_value})
if domain.not_null:
# psycopg/psycopg2 raise IntegrityError, while pg8000 raises
# ProgrammingError
with expect_raises(exc.DatabaseError):
connection.execute(table.insert(), {"value": None})
else:
connection.execute(table.insert(), {"value": None})
| DomainTest |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 36449,
"end": 40040
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
_SUPPORTED_DTYPES = [dtypes.int8, dtypes.uint8,
dtypes.int16, dtypes.uint16,
dtypes.int32, dtypes.uint32,
dtypes.int64, dtypes.uint64,
dtypes.bfloat16, dtypes.float16,
dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128]
@parameterized.parameters(*_SUPPORTED_DTYPES)
def testBasic(self, dtype):
if dtype.is_unsigned:
nums = np.arange(0, 120, 3).reshape(40, 1)
divs = np.arange(0, 48, 4).reshape(1, 12)
elif dtype.is_integer:
nums = np.arange(-120, 120, 3).reshape(80, 1)
divs = np.arange(-48, 48, 4).reshape(1, 24)
else:
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 3, .25).reshape(1, 24)
assert 0 in divs, "Bad test set-up"
tf_nums = constant_op.constant(nums, dtype=dtype)
tf_divs = constant_op.constant(divs, dtype=dtype)
# Use tf versions for expected value to ensure inputs are identical
# (e.g. in the case of bfloat16).
np_nums = self.evaluate(tf_nums)
np_divs = self.evaluate(tf_divs)
np_result = np.true_divide(np_nums, np_divs)
np_result[:, np_divs[0] == 0] = 0
with test_util.use_gpu():
tf_result = math_ops.div_no_nan(tf_nums, tf_divs)
self.assertAllCloseAccordingToType(tf_result, np_result)
@parameterized.product(
type_x=_SUPPORTED_DTYPES + [float, int],
type_y=_SUPPORTED_DTYPES + [float, int])
def testSameSupportedTypesAsDivide(self, type_x, type_y):
def one(type_):
if type_ is int:
return 1
elif type_ is float:
return 1.0
else:
return constant_op.constant(1, dtype=type_)
x = one(type_x)
y = one(type_y)
divide_raises = False
try:
divide_result = math_ops.divide(x, y)
except TypeError:
divide_raises = True
if divide_raises:
with self.assertRaises(TypeError):
_ = math_ops.div_no_nan(x, y)
else:
divide_no_nan_result = math_ops.div_no_nan(x, y)
self.assertEqual(divide_no_nan_result.dtype, divide_result.dtype)
self.assertAllEqual(divide_no_nan_result, divide_result)
@parameterized.parameters((dtypes.bfloat16), (dtypes.float16),
(dtypes.float32), (dtypes.float64),
(dtypes.complex64), (dtypes.complex128))
def testSmall(self, dtype):
# Choose values whose squared magnitude underflows to zero/subnormal.
zero = constant_op.constant([0, 0, 0, 0], dtype=dtype)
divs = constant_op.constant([1e-25, -1e-20, 1e-165, -1e-160], dtype=dtype)
tf_result = math_ops.div_no_nan(zero, divs)
# Results should always be exactly zero.
self.assertAllEqual(tf_result, zero)
@parameterized.parameters((dtypes.bfloat16), (dtypes.float16),
(dtypes.float32), (dtypes.float64),
(dtypes.complex64), (dtypes.complex128))
def testNonFiniteInNumerator(self, dtype):
nums = constant_op.constant([np.nan, np.inf, -np.inf], dtype=dtype)
zeros = constant_op.constant([0, 0, 0], dtype=dtype)
ones = constant_op.constant([1, 1, 1], dtype=dtype)
with test_util.use_gpu():
tf_result_zeros = math_ops.div_no_nan(nums, zeros)
self.assertAllEqual([0, 0, 0], tf_result_zeros)
tf_result_ones = math_ops.div_no_nan(nums, ones)
self.assertAllEqual(nums / ones, tf_result_ones)
@test_util.run_all_in_graph_and_eager_modes
| DivNoNanTest |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 21797,
"end": 25290
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": 9000, "dynamic": ["license"]}})
with pytest.raises(
ValueError,
match="Metadata field `license` cannot be both statically defined and listed in field `project.dynamic`",
):
_ = metadata.core.license
def test_invalid_type(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": 9000}})
with pytest.raises(TypeError, match="Field `project.license` must be a string or a table"):
_ = metadata.core.license
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.license == metadata.core.license == ""
assert metadata.core.license_expression == metadata.core.license_expression == ""
def test_normalization(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": "mit or apache-2.0"}})
assert metadata.core.license_expression == "MIT OR Apache-2.0"
def test_invalid_expression(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": "mit or foo"}})
with pytest.raises(ValueError, match="Error parsing field `project.license` - Unknown license: 'foo'"):
_ = metadata.core.license_expression
def test_multiple_options(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {"file": "", "text": ""}}})
with pytest.raises(ValueError, match="Cannot specify both `file` and `text` in the `project.license` table"):
_ = metadata.core.license
def test_no_option(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {}}})
with pytest.raises(ValueError, match="Must specify either `file` or `text` in the `project.license` table"):
_ = metadata.core.license
def test_file_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {"file": 4}}})
with pytest.raises(TypeError, match="Field `file` in the `project.license` table must be a string"):
_ = metadata.core.license
def test_file_nonexistent(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {"file": "foo/bar.md"}}})
with pytest.raises(OSError, match="License file does not exist: foo/bar\\.md"):
_ = metadata.core.license
def test_file_correct(self, temp_dir):
metadata = ProjectMetadata(str(temp_dir), None, {"project": {"license": {"file": "foo/bar.md"}}})
file_path = temp_dir / "foo" / "bar.md"
file_path.ensure_parent_dir_exists()
file_path.write_text("test content")
assert metadata.core.license == "test content"
def test_text_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {"text": 4}}})
with pytest.raises(TypeError, match="Field `text` in the `project.license` table must be a string"):
_ = metadata.core.license
def test_text_correct(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license": {"text": "test content"}}})
assert metadata.core.license == "test content"
| TestLicense |
python | getsentry__responses | responses/__init__.py | {
"start": 21602,
"end": 43817
} | class ____:
DELETE: Literal["DELETE"] = "DELETE"
GET: Literal["GET"] = "GET"
HEAD: Literal["HEAD"] = "HEAD"
OPTIONS: Literal["OPTIONS"] = "OPTIONS"
PATCH: Literal["PATCH"] = "PATCH"
POST: Literal["POST"] = "POST"
PUT: Literal["PUT"] = "PUT"
Response: Type[Response] = Response
# Make the `matchers` name available under a RequestsMock instance
from responses import matchers
response_callback: Optional[Callable[[Any], Any]] = None
def __init__(
self,
assert_all_requests_are_fired: bool = True,
response_callback: Optional[Callable[[Any], Any]] = None,
passthru_prefixes: Tuple[str, ...] = (),
target: str = "requests.adapters.HTTPAdapter.send",
registry: Type[FirstMatchRegistry] = FirstMatchRegistry,
*,
real_adapter_send: "_HTTPAdapterSend" = _real_send,
) -> None:
self._calls: CallList = CallList()
self.reset()
self._registry: FirstMatchRegistry = registry() # call only after reset
self.assert_all_requests_are_fired: bool = assert_all_requests_are_fired
self.response_callback: Optional[Callable[[Any], Response]] = response_callback
self.passthru_prefixes: Tuple[_URLPatternType, ...] = tuple(passthru_prefixes)
self.target: str = target
self._patcher: Optional["_mock_patcher[Any]"] = None
self._thread_lock = _ThreadingLock()
self._real_send = real_adapter_send
def get_registry(self) -> FirstMatchRegistry:
"""Returns current registry instance with responses.
Returns
-------
FirstMatchRegistry
Current registry instance with responses.
"""
return self._registry
def _set_registry(self, new_registry: Type[FirstMatchRegistry]) -> None:
"""Replaces current registry with `new_registry`.
Parameters
----------
new_registry : Type[FirstMatchRegistry]
Class reference of the registry that should be set, eg OrderedRegistry
"""
if self.registered():
err_msg = (
"Cannot replace Registry, current registry has responses.\n"
"Run 'responses.registry.reset()' first"
)
raise AttributeError(err_msg)
self._registry = new_registry()
def reset(self) -> None:
"""Resets registry (including type), calls, passthru_prefixes to default values."""
self._registry = FirstMatchRegistry()
self._calls.reset()
self.passthru_prefixes = ()
def add(
self,
method: "_HTTPMethodOrResponse" = None,
url: "Optional[_URLPatternType]" = None,
body: "_Body" = "",
adding_headers: "_HeaderSet" = None,
*args: Any,
**kwargs: Any,
) -> BaseResponse:
"""
>>> import responses
A basic request:
>>> responses.add(responses.GET, 'http://example.com')
You can also directly pass an object which implements the
``BaseResponse`` interface:
>>> responses.add(Response(...))
A JSON payload:
>>> responses.add(
>>> method='GET',
>>> url='http://example.com',
>>> json={'foo': 'bar'},
>>> )
Custom headers:
>>> responses.add(
>>> method='GET',
>>> url='http://example.com',
>>> headers={'X-Header': 'foo'},
>>> )
"""
if isinstance(method, BaseResponse):
return self._registry.add(method)
if adding_headers is not None:
kwargs.setdefault("headers", adding_headers)
if (
"content_type" in kwargs
and "headers" in kwargs
and kwargs["headers"] is not None
):
header_keys = [header.lower() for header in kwargs["headers"]]
if "content-type" in header_keys:
raise RuntimeError(
"You cannot define both `content_type` and `headers[Content-Type]`."
" Using the `content_type` kwarg is recommended."
)
assert url is not None
assert isinstance(method, str)
response = Response(method=method, url=url, body=body, **kwargs)
return self._registry.add(response)
delete = partialmethod(add, DELETE)
get = partialmethod(add, GET)
head = partialmethod(add, HEAD)
options = partialmethod(add, OPTIONS)
patch = partialmethod(add, PATCH)
post = partialmethod(add, POST)
put = partialmethod(add, PUT)
def _parse_response_file(
self, file_path: "Union[str, bytes, os.PathLike[Any]]"
) -> "Dict[str, Any]":
with open(file_path) as file:
data = yaml.safe_load(file)
return data
def _add_from_file(self, file_path: "Union[str, bytes, os.PathLike[Any]]") -> None:
data = self._parse_response_file(file_path)
for rsp in data["responses"]:
rsp = rsp["response"]
self.add(
method=rsp["method"],
url=rsp["url"],
body=rsp["body"],
status=rsp["status"],
headers=rsp["headers"] if "headers" in rsp else None,
content_type=rsp["content_type"],
auto_calculate_content_length=rsp["auto_calculate_content_length"],
)
def add_passthru(self, prefix: "_URLPatternType") -> None:
"""
Register a URL prefix or regex to passthru any non-matching mock requests to.
For example, to allow any request to 'https://example.com', but require
mocks for the remainder, you would add the prefix as so:
>>> import responses
>>> responses.add_passthru('https://example.com')
Regex can be used like:
>>> import re
>>> responses.add_passthru(re.compile('https://example.com/\\w+'))
"""
if not isinstance(prefix, Pattern) and _has_unicode(prefix):
prefix = _clean_unicode(prefix)
self.passthru_prefixes += (prefix,)
def remove(
self,
method_or_response: "_HTTPMethodOrResponse" = None,
url: "Optional[_URLPatternType]" = None,
) -> List[BaseResponse]:
"""
Removes a response previously added using ``add()``, identified
either by a response object inheriting ``BaseResponse`` or
``method`` and ``url``. Removes all matching responses.
>>> import responses
>>> responses.add(responses.GET, 'http://example.org')
>>> responses.remove(responses.GET, 'http://example.org')
"""
if isinstance(method_or_response, BaseResponse):
response = method_or_response
else:
assert url is not None
assert isinstance(method_or_response, str)
response = BaseResponse(method=method_or_response, url=url)
return self._registry.remove(response)
def replace(
self,
method_or_response: "_HTTPMethodOrResponse" = None,
url: "Optional[_URLPatternType]" = None,
body: "_Body" = "",
*args: Any,
**kwargs: Any,
) -> BaseResponse:
"""
Replaces a response previously added using ``add()``. The signature
is identical to ``add()``. The response is identified using ``method``
and ``url``, and the first matching response is replaced.
>>> import responses
>>> responses.add(responses.GET, 'http://example.org', json={'data': 1})
>>> responses.replace(responses.GET, 'http://example.org', json={'data': 2})
"""
if isinstance(method_or_response, BaseResponse):
response = method_or_response
else:
assert url is not None
assert isinstance(method_or_response, str)
response = Response(method=method_or_response, url=url, body=body, **kwargs)
return self._registry.replace(response)
def upsert(
self,
method_or_response: "_HTTPMethodOrResponse" = None,
url: "Optional[_URLPatternType]" = None,
body: "_Body" = "",
*args: Any,
**kwargs: Any,
) -> BaseResponse:
"""
Replaces a response previously added using ``add()``, or adds the response
if no response exists. Responses are matched using ``method``and ``url``.
The first matching response is replaced.
>>> import responses
>>> responses.add(responses.GET, 'http://example.org', json={'data': 1})
>>> responses.upsert(responses.GET, 'http://example.org', json={'data': 2})
"""
try:
return self.replace(method_or_response, url, body, *args, **kwargs)
except ValueError:
return self.add(method_or_response, url, body, *args, **kwargs)
def add_callback(
self,
method: str,
url: "_URLPatternType",
callback: Callable[
["PreparedRequest"],
Union[Exception, Tuple[int, Mapping[str, str], "_Body"]],
],
match_querystring: Union[bool, FalseBool] = FalseBool(),
content_type: Optional[str] = "text/plain",
match: "_MatcherIterable" = (),
) -> BaseResponse:
return self._registry.add(
CallbackResponse(
url=url,
method=method,
callback=callback,
content_type=content_type,
match_querystring=match_querystring,
match=match,
)
)
def registered(self) -> List["BaseResponse"]:
return self._registry.registered
@property
def calls(self) -> CallList:
return self._calls
def __enter__(self) -> "RequestsMock":
self.start()
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
success = type is None
try:
self.stop(allow_assert=success)
finally:
self.reset()
@overload
def activate(self, func: "_F" = ...) -> "_F":
"""Overload for scenario when 'responses.activate' is used."""
@overload
def activate(
self,
*,
registry: Type[Any] = ...,
assert_all_requests_are_fired: bool = ...,
) -> Callable[["_F"], "_F"]:
"""Overload for scenario when
'responses.activate(registry=, assert_all_requests_are_fired=True)' is used.
See https://github.com/getsentry/responses/pull/469 for more details
"""
def activate(
self,
func: Optional["_F"] = None,
*,
registry: Optional[Type[Any]] = None,
assert_all_requests_are_fired: bool = False,
) -> Union[Callable[["_F"], "_F"], "_F"]:
if func is not None:
return get_wrapped(func, self)
def deco_activate(function: "_F") -> Callable[..., Any]:
return get_wrapped(
function,
self,
registry=registry,
assert_all_requests_are_fired=assert_all_requests_are_fired,
)
return deco_activate
def _find_match(
self, request: "PreparedRequest"
) -> Tuple[Optional["BaseResponse"], List[str]]:
"""
Iterates through all available matches and validates if any of them matches the request
:param request: (PreparedRequest), request object
:return:
(Response) found match. If multiple found, then remove & return the first match.
(list) list with reasons why other matches don't match
"""
with self._thread_lock:
return self._registry.find(request)
def _parse_request_params(
self, url: str
) -> Dict[str, Union[str, int, float, List[Optional[Union[str, int, float]]]]]:
params: Dict[str, Union[str, int, float, List[Any]]] = {}
for key, val in groupby(parse_qsl(urlsplit(url).query), lambda kv: kv[0]):
values = list(map(lambda x: x[1], val))
if len(values) == 1:
values = values[0] # type: ignore[assignment]
params[key] = values
return params
def _read_filelike_body(
self, body: Union[str, bytes, BufferedReader, None]
) -> Union[str, bytes, None]:
# Requests/urllib support multiple types of body, including file-like objects.
# Read from the file if it's a file-like object to avoid storing a closed file
# in the call list and allow the user to compare against the data that was in the
# request.
# See GH #719
if isinstance(body, str) or isinstance(body, bytes) or body is None:
return body
# Based on
# https://github.com/urllib3/urllib3/blob/abbfbcb1dd274fc54b4f0a7785fd04d59b634195/src/urllib3/util/request.py#L220
if hasattr(body, "read") or isinstance(body, BufferedReader):
return body.read()
return body
def _on_request(
self,
adapter: "HTTPAdapter",
request: "PreparedRequest",
*,
retries: Optional["_Retry"] = None,
**kwargs: Any,
) -> "models.Response":
# add attributes params and req_kwargs to 'request' object for further match comparison
# original request object does not have these attributes
request.params = self._parse_request_params(request.path_url) # type: ignore[attr-defined]
request.req_kwargs = kwargs # type: ignore[attr-defined]
request_url = str(request.url)
request.body = self._read_filelike_body(request.body)
match, match_failed_reasons = self._find_match(request)
resp_callback = self.response_callback
if match is None:
if any(
[
p.match(request_url)
if isinstance(p, Pattern)
else request_url.startswith(p)
for p in self.passthru_prefixes
]
):
logger.info("request.allowed-passthru", extra={"url": request_url})
return self._real_send(adapter, request, **kwargs) # type: ignore
error_msg = (
"Connection refused by Responses - the call doesn't "
"match any registered mock.\n\n"
"Request: \n"
f"- {request.method} {request_url}\n\n"
"Available matches:\n"
)
for i, m in enumerate(self.registered()):
error_msg += "- {} {} {}\n".format(
m.method, m.url, match_failed_reasons[i]
)
if self.passthru_prefixes:
error_msg += "Passthru prefixes:\n"
for p in self.passthru_prefixes:
error_msg += f"- {p}\n"
response = ConnectionError(error_msg)
response.request = request
self._calls.add(request, response)
raise response
if match.passthrough:
logger.info("request.passthrough-response", extra={"url": request_url})
response = self._real_send(adapter, request, **kwargs) # type: ignore
else:
try:
response = adapter.build_response( # type: ignore[assignment]
request, match.get_response(request)
)
except BaseException as response:
call = Call(request, response)
self._calls.add_call(call)
match.calls.add_call(call)
raise
if resp_callback:
response = resp_callback(response) # type: ignore[misc]
call = Call(request, response) # type: ignore[misc]
self._calls.add_call(call)
match.calls.add_call(call)
retries = retries or adapter.max_retries
# first validate that current request is eligible to be retried.
# See ``urllib3.util.retry.Retry`` documentation.
if retries.is_retry(
method=response.request.method, status_code=response.status_code # type: ignore[misc]
):
try:
retries = retries.increment(
method=response.request.method, # type: ignore[misc]
url=response.url, # type: ignore[misc]
response=response.raw, # type: ignore[misc]
)
return self._on_request(adapter, request, retries=retries, **kwargs)
except MaxRetryError as e:
if retries.raise_on_status:
"""Since we call 'retries.increment()' by ourselves, we always set "error"
argument equal to None, thus, MaxRetryError exception will be raised with
ResponseError as a 'reason'.
Here we're emulating the `if isinstance(e.reason, ResponseError):`
branch found at:
https://github.com/psf/requests/blob/
177dd90f18a8f4dc79a7d2049f0a3f4fcc5932a0/requests/adapters.py#L549
"""
raise RetryError(e, request=request)
return response
return response
def unbound_on_send(self) -> "UnboundSend":
def send(
adapter: "HTTPAdapter",
request: "PreparedRequest",
*args: Any,
**kwargs: Any,
) -> "models.Response":
if args:
# that probably means that the request was sent from the custom adapter
# It is fully legit to send positional args from adapter, although,
# `requests` implementation does it always with kwargs
# See for more info: https://github.com/getsentry/responses/issues/642
try:
kwargs["stream"] = args[0]
kwargs["timeout"] = args[1]
kwargs["verify"] = args[2]
kwargs["cert"] = args[3]
kwargs["proxies"] = args[4]
except IndexError:
# not all kwargs are required
pass
return self._on_request(adapter, request, **kwargs)
return send
def start(self) -> None:
if self._patcher:
# we must not override value of the _patcher if already applied
# this prevents issues when one decorated function is called from
# another decorated function
return
self._patcher = std_mock.patch(target=self.target, new=self.unbound_on_send())
self._patcher.start()
def stop(self, allow_assert: bool = True) -> None:
if self._patcher:
# prevent stopping unstarted patchers
self._patcher.stop()
# once patcher is stopped, clean it. This is required to create a new
# fresh patcher on self.start()
self._patcher = None
if not self.assert_all_requests_are_fired:
return
if not allow_assert:
return
not_called = [m for m in self.registered() if m.call_count == 0]
if not_called:
raise AssertionError(
"Not all requests have been executed {!r}".format(
[(match.method, match.url) for match in not_called]
)
)
def assert_call_count(self, url: str, count: int) -> bool:
call_count = len(
[
1
for call in self.calls
if call.request.url == _ensure_url_default_path(url)
]
)
if call_count == count:
return True
else:
raise AssertionError(
f"Expected URL '{url}' to be called {count} times. Called {call_count} times."
)
# expose default mock namespace
mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False)
__all__ = [
"CallbackResponse",
"Response",
"RequestsMock",
# Exposed by the RequestsMock class:
"activate",
"add",
"_add_from_file",
"add_callback",
"add_passthru",
"_deprecated_assert_all_requests_are_fired",
"assert_call_count",
"calls",
"delete",
"DELETE",
"get",
"GET",
"head",
"HEAD",
"options",
"OPTIONS",
"_deprecated_passthru_prefixes",
"patch",
"PATCH",
"post",
"POST",
"put",
"PUT",
"registered",
"remove",
"replace",
"reset",
"response_callback",
"start",
"stop",
"_deprecated_target",
"upsert",
]
# expose only methods and/or read-only methods
activate = _default_mock.activate
add = _default_mock.add
_add_from_file = _default_mock._add_from_file
add_callback = _default_mock.add_callback
add_passthru = _default_mock.add_passthru
_deprecated_assert_all_requests_are_fired = _default_mock.assert_all_requests_are_fired
assert_call_count = _default_mock.assert_call_count
calls = _default_mock.calls
delete = _default_mock.delete
DELETE = _default_mock.DELETE
get = _default_mock.get
GET = _default_mock.GET
head = _default_mock.head
HEAD = _default_mock.HEAD
options = _default_mock.options
OPTIONS = _default_mock.OPTIONS
_deprecated_passthru_prefixes = _default_mock.passthru_prefixes
patch = _default_mock.patch
PATCH = _default_mock.PATCH
post = _default_mock.post
POST = _default_mock.POST
put = _default_mock.put
PUT = _default_mock.PUT
registered = _default_mock.registered
remove = _default_mock.remove
replace = _default_mock.replace
reset = _default_mock.reset
response_callback = _default_mock.response_callback
start = _default_mock.start
stop = _default_mock.stop
_deprecated_target = _default_mock.target
upsert = _default_mock.upsert
deprecated_names = ["assert_all_requests_are_fired", "passthru_prefixes", "target"]
def __getattr__(name: str) -> Any:
if name in deprecated_names:
warn(
f"{name} is deprecated. Please use 'responses.mock.{name}",
DeprecationWarning,
)
return globals()[f"_deprecated_{name}"]
raise AttributeError(f"module {__name__} has no attribute {name}")
| RequestsMock |
python | getsentry__sentry | src/sentry/issues/endpoints/group_notes_details.py | {
"start": 863,
"end": 4372
} | class ____(GroupEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
# We explicitly don't allow a request with an ApiKey
# since an ApiKey is bound to the Organization, not
# an individual. Not sure if we'd want to allow an ApiKey
# to delete/update other users' comments
def delete(self, request: Request, group: Group, note_id: str) -> Response:
if not request.user.is_authenticated:
raise PermissionDenied(detail="Key doesn't have permission to delete Note")
notes_by_user = Activity.objects.filter(
group=group, type=ActivityType.NOTE.value, user_id=request.user.id
)
if not len(notes_by_user):
raise ResourceDoesNotExist
user_note = [n for n in notes_by_user if n.id == int(note_id)]
if not user_note or len(user_note) > 1:
raise ResourceDoesNotExist
note = user_note[0]
webhook_data = {
"comment_id": note.id,
"timestamp": note.datetime,
"comment": note.data.get("text"),
"project_slug": note.project.slug,
}
note.delete()
comment_deleted.send_robust(
project=group.project,
user=request.user,
group=group,
data=webhook_data,
sender="delete",
)
# if the user left more than one comment, we want to keep the subscription
if len(notes_by_user) == 1:
GroupSubscription.objects.filter(
user_id=request.user.id,
group=group,
project=group.project,
reason=GroupSubscriptionReason.comment,
).delete()
return Response(status=204)
def put(self, request: Request, group: Group, note_id: str) -> Response:
if not request.user.is_authenticated:
raise PermissionDenied(detail="Key doesn't have permission to edit Note")
try:
note = Activity.objects.get(
group=group, type=ActivityType.NOTE.value, user_id=request.user.id, id=note_id
)
except Activity.DoesNotExist:
raise ResourceDoesNotExist
serializer = NoteSerializer(data=request.data, context={"organization": group.organization})
if serializer.is_valid():
payload = serializer.validated_data
# TODO: adding mentions to a note doesn't send notifications. Should it?
# Remove mentions as they shouldn't go into the database
payload.pop("mentions", [])
# Would be nice to have a last_modified timestamp we could bump here
note.data.update(dict(payload))
note.save()
if note.data.get("external_id"):
self.update_external_comment(request, group, note)
webhook_data = {
"comment_id": note.id,
"timestamp": note.datetime,
"comment": note.data.get("text"),
"project_slug": note.project.slug,
}
comment_updated.send_robust(
project=group.project,
user=request.user,
group=group,
data=webhook_data,
sender="put",
)
return Response(serialize(note, request.user), status=200)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| GroupNotesDetailsEndpoint |
python | django__django | tests/migrations/test_migrations_squashed_complex/3_auto.py | {
"start": 35,
"end": 188
} | class ____(migrations.Migration):
dependencies = [("migrations", "2_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | pytorch__pytorch | test/xpu/test_gemm.py | {
"start": 4011,
"end": 60324
} | class ____(TestCase):
def _test_addmm_addmv(
self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False, activation=None
):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16, torch.half}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
if activation == "gelu":
res1 = f(t, m, v, alpha=alpha, beta=beta, use_gelu=True)
else:
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
if activation == "gelu":
f(t, m, v, alpha=alpha, beta=beta, out=res2, use_gelu=True)
else:
f(t, m, v, alpha=alpha, beta=beta, out=res2)
m.to(numpy_dtype).cpu().numpy()
v.to(numpy_dtype).cpu().numpy()
res3 = alpha * (
m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy()
)
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
if activation == "relu":
res3 = res3 * (res3 > 0)
elif activation == "gelu":
res3_t = torch.from_numpy(res3).to(dtype)
approximate = "tanh" if t.is_cuda else "none"
res3_t = torch.nn.functional.gelu(res3_t, approximate=approximate)
res3 = res3_t.to(numpy_dtype).cpu().numpy()
else:
assert activation is None, f"unsupported activation {activation}"
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def _test_addmm_impl(self, func, activation, device, dtype):
M = torch.randn(10, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
m1 = torch.randn(10, 50, device="cpu", dtype=torch.float32).to(dtype).to(device)
m2 = torch.randn(50, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# vector-shaped bias and beta=1 result in epilogue fusion in CUDA
V = torch.randn(25, device="cpu", dtype=torch.float32).to(dtype).to(device)
self._test_addmm_addmv(func, V, m1, m2, beta=1, activation=activation)
# Test 0-strided
M = (
torch.randn(10, 1, device="cpu", dtype=torch.float32)
.to(dtype)
.expand(10, 25)
.to(device)
)
m1 = (
torch.randn(10, 1, device="cpu", dtype=torch.float32)
.to(dtype)
.expand(10, 50)
.to(device)
)
m2 = torch.randn(50, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test beta=0, M=nan
M = (
torch.full((10, 25), math.nan, device="cpu", dtype=torch.float32)
.to(dtype)
.to(device)
)
m1 = torch.randn(10, 50, device="cpu", dtype=torch.float32).to(dtype).to(device)
m2 = torch.randn(50, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
self._test_addmm_addmv(func, M, m1, m2, beta=0, activation=activation)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(
func, M, m1, m2, transpose_out=t4, activation=activation
)
if t1:
# use vector V instead of matrix M for epilogue fusion in CUDA (doesn't depend on t1)
self._test_addmm_addmv(
func,
V,
m1,
m2,
beta=1,
transpose_out=t4,
activation=activation,
)
@precisionOverride({torch.float: 1e-4, torch.double: 1e-6, torch.half: 1e-1})
@dtypes(torch.float32, torch.half, torch.double, torch.complex64)
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
self._test_addmm_impl(torch.addmm, None, device, dtype)
@precisionOverride({torch.float: 1e-4, torch.double: 1e-6, torch.half: 1e-1})
@dtypes(torch.float, torch.half, torch.double)
def test_addmm_badmm_scalar_tnesor_input(self, device, dtype):
input = torch.tensor(1).to(device=device, dtype=dtype)
# test addmm
mat1 = torch.randn(10, 25, device=device).to(dtype)
mat2 = torch.randn(25, 10, device=device).to(dtype)
result = torch.addmm(input, mat1, mat2)
ref = mat1.cpu().numpy() @ mat2.cpu().numpy() + 1
self.assertEqual(result, ref)
# test baddbmm
mat1 = torch.randn(3, 10, 25, device=device).to(dtype)
mat2 = torch.randn(3, 25, 10, device=device).to(dtype)
result = torch.baddbmm(input, mat1, mat2)
ref = mat1.cpu().numpy() @ mat2.cpu().numpy() + 1
self.assertEqual(result, ref)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 1e-3, torch.float: 1e-4})
@dtypes(torch.bfloat16, torch.half, torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2
* torch.ones(1, device=device)
.to(dtype)
.expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2
* torch.ones((), device=device)
.to(dtype)
.expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2
* torch.randint(3, (50, 1), dtype=torch.float, device=device)
.to(dtype)
.expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypes(
torch.half,
torch.float32,
torch.float64,
torch.complex64,
)
@tf32_on_and_off(0.05)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
dtype_ = torch.float if dtype == torch.half else dtype
if dtype == torch.half:
mat1 = mat1.float()
mat2 = mat2.float()
res = torch.zeros(n, p, dtype=dtype_, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res.half() if dtype == torch.half else res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
def genf_Half(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for n, m, p in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif dtype == torch.bfloat16:
genf = genf_bfloat
elif dtype == torch.half:
genf = genf_Half
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@dtypes(torch.float32, torch.bfloat16, torch.half, torch.float64, torch.complex64)
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(
itertools.permutations((0, 1, 2)), repeat=2
):
b1 = make_tensor(
(num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1
)
b2 = make_tensor(
(num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1
)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(
shape1, dtype=dtype, device=device, low=-0.1, high=0.1
).expand(num_batches, M, N)
b2 = make_tensor(
shape2, dtype=dtype, device=device, low=-0.1, high=0.1
).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(
generate_inputs(num_batches), itertools.permutations((0, 1, 2))
):
res1 = torch.bmm(b1, b2)
res2 = (
torch.full(
(num_batches, M, O), math.nan, dtype=dtype, device=device
)
.permute(perm3)
.contiguous()
.permute(invert_perm(perm3))
)
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == "cuda":
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(
RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu())
)
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"
):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2)
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"
):
getattr(out_tensor, func + "_")(1.0, 0.5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1.0, alpha=0.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"
):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=0.5)
self.assertEqual(res4, ref * 3)
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=0.1j, alpha=0.5j)
self.assertEqual(res6, out_tensor * 0.1j + 0.5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=0.1, alpha=0.5)
self.assertEqual(res6, out_tensor * 0.1 + 0.5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@dtypes(torch.float64, torch.float32, torch.bfloat16, torch.half, torch.complex64)
@tf32_on_and_off(0.005)
def test_addbmm(self, device, dtype):
num_batches = 2
M, N, O = 16, 17, 18
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(
itertools.permutations((0, 1, 2)), repeat=2
):
for perm3 in itertools.permutations((0, 1)):
b1 = (
make_tensor(
(num_batches, M, N),
dtype=dtype,
device=device,
low=-1,
high=1,
)
* 0.1
)
b2 = (
make_tensor(
(num_batches, N, O),
dtype=dtype,
device=device,
low=-1,
high=1,
)
* 0.1
)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = (
torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy()
@ b2.to(numpy_dtype).cpu().numpy()
)
.to(device=device, dtype=dtype)
.sum(0)
)
out_tensor = (
torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = (
make_tensor(
shape1, dtype=dtype, device=device, low=-1, high=1
).expand(num_batches, M, N)
* 0.1
)
b2 = (
make_tensor(
shape2, dtype=dtype, device=device, low=-1, high=1
).expand(num_batches, N, O)
* 0.1
)
ref = (
torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy()
@ b2.to(numpy_dtype).cpu().numpy()
)
.to(device=device, dtype=dtype)
.sum(0)
)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = (
make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1)
* 0.1
)
b2 = (
make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1)
* 0.1
)
ref = (
torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy()
@ b2.to(numpy_dtype).cpu().numpy()
)
.to(device=device, dtype=dtype)
.sum(0)
)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5, torch.float64: 1e-6})
@dtypes(torch.float64, torch.float32, torch.bfloat16, torch.half, torch.complex64)
@tf32_on_and_off(0.01)
def test_baddbmm(self, device, dtype):
num_batches = 10
M, N, O = 12, 8, 50
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = (
dtype if dtype not in [torch.bfloat16, torch.half] else torch.float32
)
# transposed tensors
for perm1, perm2, perm3 in itertools.product(
itertools.permutations((0, 1, 2)), repeat=3
):
b1 = make_tensor(
(num_batches, M, N), dtype=dtype, device=device, low=-1, high=1
)
b2 = make_tensor(
(num_batches, N, O), dtype=dtype, device=device, low=-1, high=1
)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = (
out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(
shape1, dtype=dtype, device=device, low=-1, high=1
).expand(num_batches, M, N)
b2 = make_tensor(
shape2, dtype=dtype, device=device, low=-1, high=1
).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-2, high=2)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
@tf32_on_and_off(0.05)
def test_tensordot(self, device):
a = torch.arange(60.0, device=device).reshape(3, 4, 5)
b = torch.arange(24.0, device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(
np.tensordot(a.cpu().numpy(), b.cpu().numpy(), axes=([1, 0], [0, 1]))
)
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(), axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.0), torch.tensor(0.0), 0)
an = torch.from_numpy(
np.tensordot(
np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0
)
)
self.assertEqual(a, an)
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 1e-4})
@tf32_on_and_off(0.005)
def test_1_sized_with_0_strided(self, device, dtype):
a = make_tensor((8, 1, 64), dtype=dtype, device=device)
a_strided = torch.as_strided(a, size=[8, 1, 64], stride=[64, 0, 1])
b = make_tensor((8, 64, 512), dtype=dtype, device=device)
b_strided = torch.as_strided(b, size=[8, 64, 512], stride=[64, 1, 512])
res = torch.bmm(a_strided, b_strided)
expect = torch.from_numpy(a_strided.cpu().numpy() @ b_strided.cpu().numpy()).to(
device=device, dtype=dtype
)
self.assertEqual(expect, res)
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
@tf32_on_and_off(0.005)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return (
[batch_dim, n_dim, p_dim],
[batch_dim, n_dim, m_dim],
[batch_dim, m_dim, p_dim],
)
elif fn == "addbmm":
return (
[n_dim, p_dim],
[batch_dim, n_dim, m_dim],
[batch_dim, m_dim, p_dim],
)
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@dtypes(torch.float32, torch.float64)
@tf32_on_and_off(0.005)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@tf32_on_and_off(0.005)
def test_mm_empty_inputs_mixed_dtype_errors(self, device):
a = torch.randint(0, 10, [1, 10], dtype=torch.int16, device=device)
b = torch.randn(10, 20, dtype=torch.float32, device=device)
with self.assertRaisesRegex(
RuntimeError, "expected .* and .* to have the same dtype, but got:"
):
torch.mm(a, b)
@tf32_on_and_off(0.005)
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@dtypes(
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
)
@tf32_on_and_off(0.005)
def test_baddbmm_input_dtypes_compatibility(self, device, dtype):
batch1 = torch.rand((1, 2, 2), dtype=torch.float32, device=device)
batch2 = torch.rand((1, 2, 2), dtype=torch.float32, device=device)
input_tensor = torch.rand((1, 2, 2), device=device).to(dtype)
if dtype != torch.float32:
with self.assertRaisesRegex(RuntimeError, "Input dtypes must be the same"):
torch.baddbmm(input_tensor, batch1, batch2, beta=0.0)
else:
out = torch.randn((1, 2, 2), dtype=dtype, device=device).fill_(torch.nan)
y_ref = torch.bmm(batch1, batch2)
torch.baddbmm(input_tensor, batch1, batch2, beta=0.0, out=out)
self.assertEqual(out, y_ref)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_baddbmm_nan_input_with_zero_beta(self, device, dtype):
for shape in [[3, 2, 2], [2, 20, 20]]:
mat1, mat2 = (
torch.randn(shape, dtype=dtype, device=device) for _ in range(2)
)
inputs = [
torch.randn(shape, dtype=dtype, device=device),
torch.randn(shape, dtype=dtype, device=device).fill_(torch.nan),
]
outs = [
None,
torch.randn(shape, dtype=dtype, device=device),
torch.randn(shape, dtype=dtype, device=device).fill_(torch.nan),
]
options = itertools.product(inputs, outs)
for input, out in options:
y_ref = torch.bmm(mat1, mat2)
y = torch.baddbmm(input, mat1, mat2, beta=0.0, out=out)
self.assertEqual(y_ref, y)
@precisionOverride({torch.double: 1e-6})
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(
RuntimeError,
f"{n}x{k + 1}.*{k}x{m}",
lambda: torch.addmm(M, m1, m2),
)
self.assertRaisesRegex(
RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2)
)
@precisionOverride(
{
torch.double: 1e-6,
torch.float: 1e-4,
torch.bfloat16: 5e-2,
torch.half: 5e-2,
torch.cfloat: 1e-4,
torch.cdouble: 1e-8,
}
)
@dtypes(torch.double, torch.float32, torch.bfloat16, torch.half)
@tf32_on_and_off(0.05)
def test_addmm_gelu(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "gelu", device, dtype)
@precisionOverride(
{
torch.double: 1e-6,
torch.float: 1e-4,
torch.bfloat16: 5e-2,
torch.half: 5e-2,
torch.cfloat: 1e-4,
torch.cdouble: 1e-8,
}
)
@dtypes(torch.double, torch.float32, torch.bfloat16, torch.half)
@tf32_on_and_off(0.05)
def test_addmm_relu(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "relu", device, dtype)
@dtypes(torch.float, torch.bfloat16, torch.half)
@tf32_on_and_off(0.005)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full(
(o, s + lda_tail), float("nan"), device=device, dtype=dtype
)
else:
a_storage = torch.full(
(s, o + lda_tail), float("nan"), device=device, dtype=dtype
).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float("nan"), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float("nan"), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product(
(False, True), (1, 2), (1, 2), (0, 1)
):
_test(row_major, incx, incy, lda_tail)
@precisionOverride(
{
torch.double: 1e-8,
torch.float: 1e-4,
torch.bfloat16: 0.6,
torch.half: 1e-1,
torch.cfloat: 1e-4,
torch.cdouble: 1e-8,
}
)
@dtypes(torch.double, torch.bfloat16, torch.half, torch.float32)
@tf32_on_and_off(0.005)
def test_corner_cases_of_cublasltmatmul(self, device, dtype):
# common case
M = torch.randn(128, device=device).to(dtype)
m1 = torch.randn(2048, 2400, device=device).to(dtype)
m2 = torch.randn(128, 2400, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
# Ntrans_B has ld >> rows
m1 = torch.rand([128, 2400]).to(dtype).to(device).t()
m2 = torch.rand([2048, 25272]).to(dtype).to(device).t()[21940:24340]
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2.t(), m1)
# trans_A has ld >> rows
m1 = torch.rand([128, 25272]).to(dtype).to(device)[:, 21940:24340].t()
m2 = torch.randn(2048, 2400, device=device).to(dtype)
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2, m1)
# large tensor dim > 65535
M = torch.randn(16, device=device).to(dtype)
m1 = torch.randn(32, 131071, device=device).to(dtype)
m2 = torch.randn(16, 131071, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(
*tuple(
torch.randn(shape, device=device)
if isinstance(shape, tuple)
else shape
for shape in args
),
**kwargs,
)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out) # noqa: F841
# FIXME(rec): should this return out1?
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(
torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6))
)
self.assertEqual(
torch.zeros((5, 6), device=device),
fn(torch.mm, (5, 0), (0, 6), test_out=True),
)
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1,), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(
torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True)
)
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(
torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6))
)
self.assertEqual(
torch.zeros((3, 5, 6), device=device),
fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True),
)
self.assertEqual(
(0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape
)
self.assertEqual(
(3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape
)
self.assertEqual(
(0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape
)
self.assertEqual(
(3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape
)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(
-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)
) # Issue #33467
self.assertEqual(
-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)
) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0.0, device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(
torch.tensor(0.0, device=device),
fn(torch.matmul, (0,), (0,), test_out=True),
)
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(
torch.zeros((5, 3, 4), device=device),
fn(torch.matmul, (5, 3, 0), (5, 0, 4)),
)
self.assertEqual(
torch.zeros((5, 3, 4), device=device),
fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True),
)
# dot
self.assertEqual(torch.tensor(0.0, device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(
torch.tensor(0.0, device=device), fn(torch.dot, (0,), (0,), test_out=True)
)
@tf32_on_and_off(0.005)
def test_large_bmm_backward(self, device):
A = torch.randn([1024, 2, 1024], device=device).mT.contiguous().mT
B = torch.randn([1, 1024, 65536], device=device, requires_grad=True)
G = torch.randn([1024, 2, 65536], device=device)
# Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM
(A @ B).backward(G)
@tf32_on_and_off(0.005)
def test_large_bmm_mm_backward(self, device):
A = torch.randn([1024, 2, 1024], device=device).mT.contiguous().mT
B = torch.randn([1024, 65536], device=device, requires_grad=True)
G = torch.randn([1024, 2, 65536], device=device)
# Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM
(A @ B).backward(G)
def check_single_matmul(self, x, y):
def assertEqual(answer, expected):
if x.dtype.is_floating_point or x.dtype.is_complex:
k = max(x.shape[-1], 1) # Scale the atol with the size of the matrix
self.assertEqual(
answer,
expected,
msg=f"{x.shape} x {y.shape} = {answer.shape}",
atol=k * 5e-5,
rtol=1e-4,
)
else:
self.assertEqual(
answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}"
)
# test x @ y
expected = np.matmul(x.cpu(), y.cpu())
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
# test out
out = torch.empty_like(ans)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
def gen_sizes_matmul(self, x_dim, y_dim=4, matrix_size=4, batch_size=3):
"""
Generates sequences of tuples (x, y) of with size(x) = x_dim and
size(y) <= y_dim that are compatible wrt. matmul
"""
assert x_dim >= 1
assert y_dim >= 2
x = x_dim
for y in range(1, y_dim + 1):
for batch, mn in product(
product(range(batch_size), repeat=max(x - 2, y - 2, 0)),
product(range(matrix_size), repeat=min(y, 2)),
):
if x == 1:
size_x = mn[:1]
size_y = batch + mn
yield size_x, size_y
else:
for k in range(matrix_size):
size_x = (k,) + mn[:1]
if x > 2:
size_x = batch[-(x - 2) :] + size_x
size_y = mn
if y > 2:
size_y = batch[-(y - 2) :] + size_y
yield size_x, size_y
@dtypes(torch.float)
def test_matmul_small_brute_force_1d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(
self.gen_sizes_matmul(1), (True, False), (True, False)
):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypes(torch.float)
def test_matmul_small_brute_force_2d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(
self.gen_sizes_matmul(2), (True, False), (True, False)
):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypes(torch.float)
def test_matmul_small_brute_force_3d_Nd(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(
self.gen_sizes_matmul(3), (True, False), (True, False)
):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_matmul_out_kernel_errors_with_autograd(self, device, dtype):
a = torch.empty(
(256, 512), device=device, dtype=dtype, requires_grad=True
).unsqueeze(0)
b = torch.empty(
(4, 128, 512), device=device, dtype=dtype, requires_grad=True
).transpose(-1, -2)
c = torch.empty((256, 4, 128), device=device, dtype=dtype).movedim(1, 0)
torch.matmul(a.detach(), b.detach(), out=c)
with self.assertRaisesRegex(
RuntimeError,
"functions with out=... arguments don't support automatic differentiation",
):
torch.matmul(a, b, out=c)
with torch.no_grad():
torch.matmul(a, b, out=c)
def _group_quantize_tensor(self, w, n_bit=4, q_group_size=16):
# w [k, n] = [32, 48]
assert w.dim() == 2
# w [n, k] = [48, 32]
w = w.transpose(0, 1).contiguous()
assert q_group_size > 1
assert w.shape[-1] % q_group_size == 0
# to_quant: [n * k / group_size, group_size]
to_quant = w.reshape(-1, q_group_size)
assert torch.isnan(to_quant).sum() == 0
max_val = to_quant.amax(dim=1, keepdim=True)
min_val = to_quant.amin(dim=1, keepdim=True)
max_int = 2**n_bit - 1
min_int = 0
scales = (max_val - min_val).clamp(min=1e-6) / max_int
assert torch.isnan(scales).sum() == 0
zeros = min_int - min_val.div(scales).round()
zeros = torch.clamp(zeros, min_int, max_int)
zeros = zeros.to(torch.int8)
assert torch.isnan(zeros).sum() == 0
out = to_quant.div(scales).add(zeros).round().clamp_(min_int, max_int)
assert torch.isnan(out).sum() == 0
# [n, k]
out = out.to(dtype=torch.int32).reshape(w.shape)
if out.device != torch.device("cpu"):
out = (out[::, 1::2] << 4 | out[::, 0::2]).to(torch.uint8)
# Scales and zeros for the same q-group should be contiguous, so we can
# load as a 32-bit word
scales = scales.view(w.shape[0], -1).transpose(0, 1).contiguous()
zeros = zeros.view(w.shape[0], -1).transpose(0, 1).contiguous()
return out, scales, zeros
@parametrize("m", [128])
@parametrize("k", [512, 1024])
@parametrize("n", [512, 1024])
def test__int4_mm(self, device, m, k, n):
q_group = 32
inner_k_tiles = 2
torch.manual_seed(1)
a_bf16 = torch.rand((m, k), dtype=torch.float32, device=device)
b_bf16 = torch.rand((k, n), dtype=torch.float32, device=device)
def convert_weight_to_int4pack(b):
# b_uint8 [n, k //2]
b_uint8, scales, zeros = self._group_quantize_tensor(
b, n_bit=4, q_group_size=q_group
)
# b_int4pack [k//8, n]
b_int4pack = torch._convert_weight_to_int4pack(b_uint8, inner_k_tiles)
return b_int4pack, scales, zeros
def weight_int4pack_mm(a, b_int4pack, qscale, qzeros):
return torch._weight_int4pack_mm_with_scales_and_zeros(
a, b_int4pack, q_group, qscale, qzeros
)
b_int4pack, b_scales, zeros_int8 = convert_weight_to_int4pack(b_bf16)
for dtype in [torch.bfloat16, torch.float16]:
a = a_bf16.to(dtype=dtype)
b = b_bf16.to(dtype=dtype)
b_scales = b_scales.to(dtype=dtype)
ref = torch.mm(a, b)
res = weight_int4pack_mm(a, b_int4pack, b_scales, zeros_int8)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
def test_mm_with_offset(self, device):
from torch._dynamo.testing import rand_strided
offset = 997
a = rand_strided(
(2, 4, 128, 64),
(65536, 16384, 64, 1),
dtype=torch.float16,
device=device,
extra_size=offset,
)
a = a.as_strided((2, 4, 128, 64), (65536, 16384, 64, 1), storage_offset=offset)
b = rand_strided(
(2, 4, 64, 256), (65536, 16384, 1, 64), dtype=torch.float16, device=device
)
gpu_out = torch.matmul(a, b)
cpu_out = torch.matmul(a.cpu(), b.cpu())
self.assertEqual(gpu_out.cpu(), cpu_out)
@parametrize("m", [0, 8, 17])
@parametrize("k", [0, 16, 32])
@parametrize("n", [16, 32])
@parametrize("use_transpose_a", [True, False])
@parametrize("use_transpose_b", [True, False])
@parametrize("non_contig_type", [0, 1, 2])
def test__int_mm(
self, device, m, k, n, use_transpose_a, use_transpose_b, non_contig_type
):
# non_contig_type:
# 0: the whole data buffer is contiguous (can be transposed)
# 1: stride of one dimension is 1, but the whole buffer is not contiguous
# 2: Neither stride is 1
def genf_int_float(x, y, use_transpose, non_contig_type):
if use_transpose:
x, y = y, x
if non_contig_type != 0:
y = y * 2
x_int8 = torch.randint(-128, 127, (x, y), dtype=torch.int8, device=device)
x_float = x_int8.to(torch.float32)
if non_contig_type == 1:
x_int8 = x_int8[:, : y // 2]
x_float = x_float[:, : y // 2]
elif non_contig_type == 2:
x_int8 = x_int8[:, ::2]
x_float = x_float[:, ::2]
if use_transpose:
return x_int8.t(), x_float.t()
return x_int8, x_float
if non_contig_type != 0 and (m == 0 or k == 0):
return
a_int8, a_float = genf_int_float(m, k, use_transpose_a, non_contig_type)
b_int8, b_float = genf_int_float(k, n, use_transpose_b, non_contig_type)
c_int32 = torch._int_mm(a_int8, b_int8)
self.assertTrue(c_int32.dtype is torch.int32)
self.assertEqual(c_int32.device, torch.device(device))
self.assertEqual(c_int32.float(), torch.mm(a_float, b_float))
c_int32_result = c_int32.new_empty(c_int32.size())
# Checking out variant
torch._int_mm(a_int8, b_int8, out=c_int32_result)
self.assertEqual(c_int32_result.float(), torch.mm(a_float, b_float))
def test_out_dtype_inductor_decomp_trace(self, device) -> None:
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device=device)
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device=device)
# Check that make_fx with inductor decomps produces _int_mm
decomp_table = torch._inductor.decomposition.select_decomp_table()
gm = make_fx(func, decomp_table, tracing_mode="symbolic")(x, w)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x_1, w_1):
_int_mm = torch.ops.aten._int_mm.default(x_1, w_1); x_1 = w_1 = None
return _int_mm""",
)
def test_out_dtype_int_mm_default_trace(self, device) -> None:
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device=device)
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device=device)
# By default, out_dtype is preserved in the trace
gm = make_fx(func, tracing_mode="symbolic")(x, w)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x_1, w_1):
out_dtype = torch.ops.higher_order.out_dtype(torch.ops.aten.mm.default, torch.int32, x_1, w_1); x_1 = w_1 = None
return out_dtype""",
)
@onlyNativeDeviceTypes
@parametrize("m", [32, 64])
@parametrize("k", [32, 64])
@parametrize("n", [48, 64])
@parametrize("compile", [True, False])
@parametrize("slice", [True, False])
def test__int8_mm(self, device, m, k, n, compile, slice):
torch.manual_seed(1)
if slice:
# logits are generated from LLaMA LM head like this -
# the activation to LM head is a slice of final hidden state
# of shape (batch_size, sequence_length, hidden dim),
# but is non-contiguous
# Using arbitrary batch-size here, since it'd be converted to 2D
batch_size = 4
a = torch.rand((batch_size, m, k), dtype=torch.bfloat16, device=device)
# Make a non-contiguous
a = a[:, -1:, :]
a = a.view(-1, a.size(-1))
else:
a = torch.rand((m, k), dtype=torch.bfloat16, device=device)
b = torch.rand((n, k), dtype=torch.bfloat16, device=device)
def convert_weight_to_int8pack(b):
b_int8pack, b_scales, _ = _dynamically_quantize_per_channel(
b, -128, 127, torch.int8
)
return b_int8pack, b_scales
def weight_int8pack_mm(a, b_int8pack, b_scales):
return torch._weight_int8pack_mm(a, b_int8pack, b_scales)
b_int8pack, b_scales = convert_weight_to_int8pack(b)
if compile:
mod = torch.compile(weight_int8pack_mm)
else:
mod = weight_int8pack_mm
res = mod(a, b_int8pack, b_scales)
ref = torch.mm(a, b.transpose(0, 1))
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
instantiate_device_type_tests(TestBasicGEMM, globals(), only_for="xpu", allow_xpu=True)
if __name__ == "__main__":
run_tests()
| TestBasicGEMM |
python | spyder-ide__spyder | spyder/plugins/findinfiles/plugin.py | {
"start": 786,
"end": 940
} | class ____:
FindInFiles = 'find in files'
# --- Plugin
# ----------------------------------------------------------------------------
| FindInFilesActions |
python | allegroai__clearml | clearml/binding/fire_bind.py | {
"start": 859,
"end": 15785
} | class ____:
_args = {}
_command_type = "fire.Command"
_command_arg_type_template = "fire.Arg@%s"
_shared_arg_type = "fire.Arg.shared"
_section_name = "Args"
_args_sep = "/"
_commands_sep = "."
_current_task = None
__remote_task_params = None
__remote_task_params_dict = {}
__patched = False
__groups = []
__commands = {}
__default_args = SimpleNamespace(
completion=None,
help=False,
interactive=False,
separator="-",
trace=False,
verbose=False,
)
__current_command = None
__fetched_current_command = False
__command_args = {}
@classmethod
def patch(cls, task: Optional["Task"] = None) -> None:
if fire is None:
return
cls._current_task = task
if task:
cls._update_task_args()
if not cls.__patched:
cls.__patched = True
if running_remotely():
fire.core._Fire = _patched_call_no_recursion_guard(fire.core._Fire, PatchFire.__Fire)
else:
fire.core._CallAndUpdateTrace = _patched_call_no_recursion_guard(
fire.core._CallAndUpdateTrace, PatchFire.__CallAndUpdateTrace
)
@classmethod
def _update_task_args(cls) -> None:
if running_remotely() or not cls._current_task:
return
args = {}
parameters_types = {}
if cls.__current_command is None:
args = {cls._section_name + cls._args_sep + k: v for k, v in cls._args.items()}
parameters_types = {cls._section_name + cls._args_sep + k: cls._shared_arg_type for k in cls._args.keys()}
for k in PatchFire.__command_args.get(None) or []:
k = cls._section_name + cls._args_sep + k
if k not in args:
args[k] = None
else:
args[cls._section_name + cls._args_sep + cls.__current_command] = True
parameters_types[cls._section_name + cls._args_sep + cls.__current_command] = cls._command_type
args.update(
{
cls._section_name + cls._args_sep + cls.__current_command + cls._args_sep + k: v
for k, v in cls._args.items()
if k in (PatchFire.__command_args.get(cls.__current_command) or [])
}
)
args.update(
{
cls._section_name + cls._args_sep + k: v
for k, v in cls._args.items()
if k not in (PatchFire.__command_args.get(cls.__current_command) or [])
}
)
parameters_types.update(
{
cls._section_name
+ cls._args_sep
+ cls.__current_command
+ cls._args_sep
+ k: cls._command_arg_type_template % cls.__current_command
for k in cls._args.keys()
if k in (PatchFire.__command_args.get(cls.__current_command) or [])
}
)
parameters_types.update(
{
cls._section_name + cls._args_sep + k: cls._shared_arg_type
for k in cls._args.keys()
if k not in (PatchFire.__command_args.get(cls.__current_command) or [])
}
)
for command in cls.__commands:
if command == cls.__current_command:
continue
args[cls._section_name + cls._args_sep + command] = False
parameters_types[cls._section_name + cls._args_sep + command] = cls._command_type
unused_command_args = {
cls._section_name + cls._args_sep + command + cls._args_sep + k: None
for k in (cls.__command_args.get(command) or [])
}
unused_paramenters_types = {
cls._section_name
+ cls._args_sep
+ command
+ cls._args_sep
+ k: cls._command_arg_type_template % command
for k in (cls.__command_args.get(command) or [])
}
args.update(unused_command_args)
parameters_types.update(unused_paramenters_types)
# noinspection PyProtectedMember
cls._current_task._set_parameters(
args,
__update=True,
__parameters_types=parameters_types,
)
@staticmethod
def __Fire(
original_fn: Callable,
component: Any,
args_: List[str],
parsed_flag_args: Dict[str, Any],
context: Dict[str, Any],
name: str,
*args: Any,
**kwargs: Any
) -> Any: # noqa
if not running_remotely():
return original_fn(component, args_, parsed_flag_args, context, name, *args, **kwargs)
command = PatchFire._load_task_params()
if command is not None:
replaced_args = command.split(PatchFire._commands_sep)
else:
replaced_args = []
for param in PatchFire.__remote_task_params[PatchFire._section_name].values():
if command is not None and param.type == PatchFire._command_arg_type_template % command:
replaced_args.append("--" + param.name[len(command + PatchFire._args_sep) :])
value = PatchFire.__remote_task_params_dict[param.name]
if len(value) > 0:
replaced_args.append(value)
if param.type == PatchFire._shared_arg_type:
replaced_args.append("--" + param.name)
value = PatchFire.__remote_task_params_dict[param.name]
if len(value) > 0:
replaced_args.append(value)
return original_fn(component, replaced_args, parsed_flag_args, context, name, *args, **kwargs)
@staticmethod
def __CallAndUpdateTrace( # noqa
original_fn: Callable,
component: Any,
args_: List[str],
component_trace: "fire.trace.FireTrace",
treatment: str,
target: Optional[Any] = None,
*args: Any,
**kwargs: Any
) -> Any:
if running_remotely():
return original_fn(component, args_, component_trace, treatment, target, *args, **kwargs)
if not PatchFire.__fetched_current_command:
PatchFire.__fetched_current_command = True
context, component_context = PatchFire.__get_context_and_component(component)
(
PatchFire.__groups,
PatchFire.__commands,
) = PatchFire.__get_all_groups_and_commands(component_context, context)
PatchFire.__current_command = PatchFire.__get_current_command(
args_, PatchFire.__groups, PatchFire.__commands
)
for command in PatchFire.__commands:
PatchFire.__command_args[command] = PatchFire.__get_command_args(
component_context,
command.split(PatchFire._commands_sep),
PatchFire.__default_args,
context,
)
PatchFire.__command_args[None] = PatchFire.__get_command_args(
component_context,
"",
PatchFire.__default_args,
context,
)
for k, v in PatchFire.__commands.items():
if v == component:
PatchFire.__current_command = k
break
# Comparing methods in Python is equivalent to comparing the __func__ of the methods
# and the objects they are bound to. We do not care about the object in this case,
# so we just compare the __func__
if inspect.ismethod(component) and inspect.ismethod(v) and v.__func__ == component.__func__:
PatchFire.__current_command = k
break
fn = component.__call__ if treatment == "callable" else component
metadata = fire.decorators.GetMetadata(component)
fn_spec = fire.inspectutils.GetFullArgSpec(component)
parse = fire.core._MakeParseFn(fn, metadata) # noqa
(parsed_args, parsed_kwargs), _, _, _ = parse(args_)
PatchFire._args.update({k: v for k, v in zip(fn_spec.args, parsed_args)})
PatchFire._args.update(parsed_kwargs)
PatchFire._update_task_args()
return original_fn(component, args_, component_trace, treatment, target, *args, **kwargs)
@staticmethod
def __get_context_and_component(
component: Any,
) -> Tuple[Dict[str, Any], Any]:
context = {}
component_context = component
# Walk through the stack to find the arguments with fire.Fire() has been called.
# Can't do it by patching the function because we want to patch _CallAndUpdateTrace,
# which is called by fire.Fire()
frame_infos = inspect.stack()
for frame_info_ind, frame_info in enumerate(frame_infos):
if frame_info.function == "Fire":
component_context = inspect.getargvalues(frame_info.frame).locals["component"]
if inspect.getargvalues(frame_info.frame).locals["component"] is None:
# This is similar to how fire finds this context
fire_context_frame = frame_infos[frame_info_ind + 1].frame
context.update(fire_context_frame.f_globals)
context.update(fire_context_frame.f_locals)
# Ignore modules, as they yield too many commands.
# Also ignore clearml.task.
context = {
k: v
for k, v in context.items()
if not inspect.ismodule(v) and (not inspect.isclass(v) or v.__module__ != "clearml.task")
}
break
return context, component_context
@staticmethod
def __get_all_groups_and_commands(component: Any, context: Dict[str, Any]) -> Tuple[List[str], Dict[str, Any]]:
groups = []
commands = {}
# skip modules
if inspect.ismodule(component):
return groups, commands
component_trace_result = PatchFire.__safe_Fire(component, [], PatchFire.__default_args, context)
group_args = [[]]
while len(group_args) > 0:
query_group = group_args[-1]
groups.append(PatchFire._commands_sep.join(query_group))
group_args = group_args[:-1]
(
current_groups,
current_commands,
) = PatchFire.__get_groups_and_commands_for_args(
component_trace_result, query_group, PatchFire.__default_args, context
)
for command in current_commands:
prefix = (
PatchFire._commands_sep.join(query_group) + PatchFire._commands_sep if len(query_group) > 0 else ""
)
commands[prefix + command[0]] = command[1]
for group in current_groups:
group_args.append(query_group + [group[0]])
return groups, commands
@staticmethod
def __get_groups_and_commands_for_args(
component: Any,
args_: List[str],
parsed_flag_args: Dict[str, Any],
context: Dict[str, Any],
name: Optional[str] = None,
) -> Tuple[List[Tuple[str, Any]], List[Tuple[str, Any]]]:
component_trace = PatchFire.__safe_Fire(component, args_, parsed_flag_args, context, name=name)
# set verbose to True or else we might miss some commands
groups, commands, _, _ = fire.helptext._GetActionsGroupedByKind(component_trace, verbose=True) # noqa
groups = [(name, member) for name, member in groups.GetItems()]
commands = [(name, member) for name, member in commands.GetItems()]
return groups, commands
@staticmethod
def __get_current_command(args_: List[str], groups: List[str], commands: Dict[str, Any]) -> Optional[str]:
current_command = ""
for arg in args_:
prefix = (current_command + PatchFire._commands_sep) if len(current_command) > 0 else ""
potential_current_command = prefix + arg
if potential_current_command not in groups:
if potential_current_command in commands:
return potential_current_command
else:
return None
current_command = potential_current_command
return None
@staticmethod
def __get_command_args(
component: Any,
args_: List[str],
parsed_flag_args: Dict[str, Any],
context: Dict[str, Any],
name: Optional[str] = None,
) -> List[str]:
component_trace = PatchFire.__safe_Fire(component, args_, parsed_flag_args, context, name=None)
fn_spec = fire.inspectutils.GetFullArgSpec(component_trace)
return fn_spec.args
@staticmethod
def __safe_Fire(
component: Any,
args_: List[str],
parsed_flag_args: Dict[str, Any],
context: Dict[str, Any],
name: Optional[str] = None,
) -> Any:
orig = None
# noinspection PyBroadException
try:
def __CallAndUpdateTrace_rogue_call_guard(*args: Any, **kwargs: Any) -> None:
raise fire.core.FireError()
orig = fire.core._CallAndUpdateTrace # noqa
fire.core._CallAndUpdateTrace = __CallAndUpdateTrace_rogue_call_guard # noqa
result = fire.core._Fire(component, args_, parsed_flag_args, context, name=name).GetResult() # noqa
except Exception:
result = None
finally:
if orig:
fire.core._CallAndUpdateTrace = orig # noqa
return result
@staticmethod
def _load_task_params() -> Optional[str]:
if not PatchFire.__remote_task_params:
from clearml import Task
t = Task.get_task(task_id=get_remote_task_id())
# noinspection PyProtectedMember
PatchFire.__remote_task_params = t._get_task_property("hyperparams") or {}
params_dict = t.get_parameters(backwards_compatibility=False)
skip = len(PatchFire._section_name) + 1
PatchFire.__remote_task_params_dict = {
k[skip:]: v
for k, v in params_dict.items()
if k.startswith(PatchFire._section_name + PatchFire._args_sep)
}
command = [
p.name
for p in PatchFire.__remote_task_params[PatchFire._section_name].values()
if p.type == PatchFire._command_type and cast_str_to_bool(p.value, strip=True)
]
return command[0] if command else None
# patch fire before anything
PatchFire.patch()
| PatchFire |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/transfers/test_vertica_to_hive.py | {
"start": 1349,
"end": 2313
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": datetime.datetime(2017, 1, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
@pytest.mark.db_test
@mock.patch(
"airflow.providers.apache.hive.transfers.vertica_to_hive.VerticaHook.get_conn",
side_effect=mock_get_conn,
)
@mock.patch("airflow.providers.apache.hive.transfers.vertica_to_hive.HiveCliHook.load_file")
def test_select_insert_transfer(self, *args):
"""
Test check selection from vertica into memory and
after that inserting into mysql
"""
task = VerticaToHiveOperator(
task_id="test_task_id",
sql="select a, b, c",
hive_table="test_table",
vertica_conn_id="test_vertica_conn_id",
hive_cli_conn_id="hive_cli_default",
dag=self.dag,
)
task.execute(None)
| TestVerticaToHiveTransfer |
python | realpython__materials | python-property/circle_v2.py | {
"start": 0,
"end": 409
} | class ____:
def __init__(self, radius):
self._radius = radius
@property
def radius(self):
"""The radius property."""
print("Get radius")
return self._radius
@radius.setter
def radius(self, value):
print("Set radius")
self._radius = value
@radius.deleter
def radius(self):
print("Delete radius")
del self._radius
| Circle |
python | doocs__leetcode | solution/1800-1899/1826.Faulty Sensor/Solution.py | {
"start": 0,
"end": 428
} | class ____:
def badSensor(self, sensor1: List[int], sensor2: List[int]) -> int:
i, n = 0, len(sensor1)
while i < n - 1:
if sensor1[i] != sensor2[i]:
break
i += 1
while i < n - 1:
if sensor1[i + 1] != sensor2[i]:
return 1
if sensor1[i] != sensor2[i + 1]:
return 2
i += 1
return -1
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 858687,
"end": 858876
} | class ____(VegaLiteSchema):
"""ParseValue schema wrapper."""
_schema = {"$ref": "#/definitions/ParseValue"}
def __init__(self, *args):
super().__init__(*args)
| ParseValue |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.