language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 1973,
"end": 2255
} | class ____(models.Model):
i1 = models.IntegerField()
i2 = models.SmallIntegerField()
i3 = models.BigIntegerField()
p1 = models.PositiveIntegerField()
p2 = models.PositiveSmallIntegerField()
d = models.DecimalField(decimal_places=2, max_digits=5)
| ManyNumerics |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 5312,
"end": 6227
} | class ____:
def __init__(self, x: object) -> None: pass
def __add__(self, n: float) -> float: pass
def __radd__(self, n: float) -> float: pass
def __sub__(self, n: float) -> float: pass
def __rsub__(self, n: float) -> float: pass
def __mul__(self, n: float) -> float: pass
def __truediv__(self, n: float) -> float: pass
def __floordiv__(self, n: float) -> float: pass
def __mod__(self, n: float) -> float: pass
def __pow__(self, n: float) -> float: pass
def __neg__(self) -> float: pass
def __pos__(self) -> float: pass
def __abs__(self) -> float: pass
def __invert__(self) -> float: pass
def __eq__(self, x: object) -> bool: pass
def __ne__(self, x: object) -> bool: pass
def __lt__(self, x: float) -> bool: ...
def __le__(self, x: float) -> bool: ...
def __gt__(self, x: float) -> bool: ...
def __ge__(self, x: float) -> bool: ...
| float |
python | cherrypy__cherrypy | cherrypy/lib/cptools.py | {
"start": 23035,
"end": 25080
} | class ____(_httputil.HeaderMap):
"""An access-tracked HTTP header mapping."""
def transform_key(self, key):
"""Normalize and track an HTTP header name."""
self.accessed_headers.add(key)
return super(MonitoredHeaderMap, self).transform_key(key)
def __init__(self):
"""Initialize a monitored HTTP header mapping."""
self.accessed_headers = set()
super(MonitoredHeaderMap, self).__init__()
def autovary(ignore=None, debug=False):
"""Populate ``Vary`` response header based on ``request.header`` access."""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY',
)
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def convert_params(exception=ValueError, error=400):
"""Convert request params based on function annotations.
This function also processes errors that are subclasses of ``exception``.
:param BaseException exception: Exception class to catch.
:type exception: BaseException
:param error: The HTTP status code to return to the client on failure.
:type error: int
"""
request = cherrypy.serving.request
types = request.handler.callable.__annotations__
with cherrypy.HTTPError.handle(exception, error):
for key in set(types).intersection(request.params):
request.params[key] = types[key](request.params[key])
| MonitoredHeaderMap |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-oci-data-science/tests/test_oci_data_science_client.py | {
"start": 3723,
"end": 4389
} | class ____:
"""Unit tests for _create_retry_decorator function."""
def test_create_retry_decorator(self):
"""Ensures the retry decorator is created with correct parameters."""
max_retries = 5
backoff_factor = 2
random_exponential = False
stop_after_delay_seconds = 100
min_seconds = 1
max_seconds = 10
retry_decorator = _create_retry_decorator(
max_retries,
backoff_factor,
random_exponential,
stop_after_delay_seconds,
min_seconds,
max_seconds,
)
assert callable(retry_decorator)
| TestCreateRetryDecorator |
python | python-openxml__python-docx | src/docx/section.py | {
"start": 762,
"end": 9143
} | class ____:
"""Document section, providing access to section and page setup settings.
Also provides access to headers and footers.
"""
def __init__(self, sectPr: CT_SectPr, document_part: DocumentPart):
super(Section, self).__init__()
self._sectPr = sectPr
self._document_part = document_part
@property
def bottom_margin(self) -> Length | None:
"""Read/write. Bottom margin for pages in this section, in EMU.
`None` when no bottom margin has been specified. Assigning |None| removes any
bottom-margin setting.
"""
return self._sectPr.bottom_margin
@bottom_margin.setter
def bottom_margin(self, value: int | Length | None):
self._sectPr.bottom_margin = value
@property
def different_first_page_header_footer(self) -> bool:
"""True if this section displays a distinct first-page header and footer.
Read/write. The definition of the first-page header and footer are accessed
using :attr:`.first_page_header` and :attr:`.first_page_footer` respectively.
"""
return self._sectPr.titlePg_val
@different_first_page_header_footer.setter
def different_first_page_header_footer(self, value: bool):
self._sectPr.titlePg_val = value
@property
def even_page_footer(self) -> _Footer:
"""|_Footer| object defining footer content for even pages.
The content of this footer definition is ignored unless the document setting
:attr:`~.Settings.odd_and_even_pages_header_footer` is set True.
"""
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.EVEN_PAGE)
@property
def even_page_header(self) -> _Header:
"""|_Header| object defining header content for even pages.
The content of this header definition is ignored unless the document setting
:attr:`~.Settings.odd_and_even_pages_header_footer` is set True.
"""
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.EVEN_PAGE)
@property
def first_page_footer(self) -> _Footer:
"""|_Footer| object defining footer content for the first page of this section.
The content of this footer definition is ignored unless the property
:attr:`.different_first_page_header_footer` is set True.
"""
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.FIRST_PAGE)
@property
def first_page_header(self) -> _Header:
"""|_Header| object defining header content for the first page of this section.
The content of this header definition is ignored unless the property
:attr:`.different_first_page_header_footer` is set True.
"""
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.FIRST_PAGE)
@lazyproperty
def footer(self) -> _Footer:
"""|_Footer| object representing default page footer for this section.
The default footer is used for odd-numbered pages when separate odd/even footers
are enabled. It is used for both odd and even-numbered pages otherwise.
"""
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.PRIMARY)
@property
def footer_distance(self) -> Length | None:
"""Distance from bottom edge of page to bottom edge of the footer.
Read/write. |None| if no setting is present in the XML.
"""
return self._sectPr.footer
@footer_distance.setter
def footer_distance(self, value: int | Length | None):
self._sectPr.footer = value
@property
def gutter(self) -> Length | None:
"""|Length| object representing page gutter size in English Metric Units.
Read/write. The page gutter is extra spacing added to the `inner` margin to
ensure even margins after page binding. Generally only used in book-bound
documents with double-sided and facing pages.
This setting applies to all pages in this section.
"""
return self._sectPr.gutter
@gutter.setter
def gutter(self, value: int | Length | None):
self._sectPr.gutter = value
@lazyproperty
def header(self) -> _Header:
"""|_Header| object representing default page header for this section.
The default header is used for odd-numbered pages when separate odd/even headers
are enabled. It is used for both odd and even-numbered pages otherwise.
"""
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.PRIMARY)
@property
def header_distance(self) -> Length | None:
"""Distance from top edge of page to top edge of header.
Read/write. |None| if no setting is present in the XML. Assigning |None| causes
default value to be used.
"""
return self._sectPr.header
@header_distance.setter
def header_distance(self, value: int | Length | None):
self._sectPr.header = value
def iter_inner_content(self) -> Iterator[Paragraph | Table]:
"""Generate each Paragraph or Table object in this `section`.
Items appear in document order.
"""
for element in self._sectPr.iter_inner_content():
yield (Paragraph(element, self) if isinstance(element, CT_P) else Table(element, self))
@property
def left_margin(self) -> Length | None:
"""|Length| object representing the left margin for all pages in this section in
English Metric Units."""
return self._sectPr.left_margin
@left_margin.setter
def left_margin(self, value: int | Length | None):
self._sectPr.left_margin = value
@property
def orientation(self) -> WD_ORIENTATION:
""":ref:`WdOrientation` member specifying page orientation for this section.
One of ``WD_ORIENT.PORTRAIT`` or ``WD_ORIENT.LANDSCAPE``.
"""
return self._sectPr.orientation
@orientation.setter
def orientation(self, value: WD_ORIENTATION | None):
self._sectPr.orientation = value
@property
def page_height(self) -> Length | None:
"""Total page height used for this section.
This value is inclusive of all edge spacing values such as margins.
Page orientation is taken into account, so for example, its expected value
would be ``Inches(8.5)`` for letter-sized paper when orientation is landscape.
"""
return self._sectPr.page_height
@page_height.setter
def page_height(self, value: Length | None):
self._sectPr.page_height = value
@property
def page_width(self) -> Length | None:
"""Total page width used for this section.
This value is like "paper size" and includes all edge spacing values such as
margins.
Page orientation is taken into account, so for example, its expected value
would be ``Inches(11)`` for letter-sized paper when orientation is landscape.
"""
return self._sectPr.page_width
@page_width.setter
def page_width(self, value: Length | None):
self._sectPr.page_width = value
@property
def part(self) -> StoryPart:
return self._document_part
@property
def right_margin(self) -> Length | None:
"""|Length| object representing the right margin for all pages in this section
in English Metric Units."""
return self._sectPr.right_margin
@right_margin.setter
def right_margin(self, value: Length | None):
self._sectPr.right_margin = value
@property
def start_type(self) -> WD_SECTION_START:
"""Type of page-break (if any) inserted at the start of this section.
For exmple, ``WD_SECTION_START.ODD_PAGE`` if the section should begin on the
next odd page, possibly inserting two page-breaks instead of one.
"""
return self._sectPr.start_type
@start_type.setter
def start_type(self, value: WD_SECTION_START | None):
self._sectPr.start_type = value
@property
def top_margin(self) -> Length | None:
"""|Length| object representing the top margin for all pages in this section in
English Metric Units."""
return self._sectPr.top_margin
@top_margin.setter
def top_margin(self, value: Length | None):
self._sectPr.top_margin = value
| Section |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 5100,
"end": 7995
} | class ____:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = ifft(x.astype(self.cdt))
y2 = numpy.fft.ifft(x.astype(self.cdt))
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
y = ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
| _TestIFFTBase |
python | google__jax | jaxlib/xla_client.py | {
"start": 12103,
"end": 14758
} | class ____(Protocol):
def __call__(
self,
name: str,
fn: Any,
platform: str,
/,
api_version: int = ...,
traits: CustomCallTargetTraits = ...,
) -> None:
...
_custom_callback_handler: dict[str, CustomCallHandler] = {}
# Key is xla_platform_name, value is (function_name, function, api_version)
_custom_callback: dict[
str, list[tuple[str, Any, int, CustomCallTargetTraits]]
] = {}
_custom_callback_lock = threading.Lock()
def register_custom_call_target(
name: str,
fn: Any,
platform: str = 'cpu',
api_version: int = 0,
traits: CustomCallTargetTraits = CustomCallTargetTraits.DEFAULT,
) -> None:
"""Registers a custom call target.
Args:
name: bytes containing the name of the function.
fn: a PyCapsule object containing the function pointer.
platform: the target platform.
api_version: the XLA FFI version to use. Supported versions are: 0 for the
untyped FFI and 1 for the typed FFI.
traits: custom call traits corresponding to XLA FFI handler traits.
"""
# To support AMD GPUs, we need to have xla_platform_names["gpu"] == "ROCM"
# Since that is hardcoded to CUDA, we are using the following as workaround.
xla_platform_name = xla_platform_names.get(platform, platform)
with _custom_callback_lock:
if xla_platform_name in _custom_callback_handler:
_custom_callback_handler[xla_platform_name](
name, fn, xla_platform_name, api_version, traits
)
else:
_custom_callback.setdefault(xla_platform_name, []).append(
(name, fn, api_version, traits)
)
def register_custom_call_handler(
platform: str, handler: CustomCallHandler
) -> None:
"""Registers a custom handler and use it to register existing custom calls.
If a custom call handler for the platform already exist, calling this method
is a no-op and it will not register a new handler.
Args:
platform: the target platform.
handler: the function to register a custom call.
"""
xla_platform_name = xla_platform_names.get(platform, platform)
with _custom_callback_lock:
if xla_platform_name in _custom_callback_handler:
logger.debug(
'Custom call handler for %s is already register. Will not register a'
' new one',
xla_platform_name,
)
return
_custom_callback_handler[xla_platform_name] = handler
if xla_platform_name in _custom_callback:
for name, fn, api_version, traits in _custom_callback[xla_platform_name]:
handler(name, fn, xla_platform_name, api_version, traits)
del _custom_callback[xla_platform_name]
| CustomCallHandler |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 443073,
"end": 443739
} | class ____(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super().__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(struct %s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.defaults_class_key).cname)
| DefaultNonLiteralArgNode |
python | davidhalter__jedi | jedi/inference/gradual/typing.py | {
"start": 15966,
"end": 17255
} | class ____(LazyValueWrapper):
"""Represents the instance version of ``TypedDictClass``."""
def __init__(self, definition_class):
self.inference_state = definition_class.inference_state
self.parent_context = definition_class.parent_context
self.tree_node = definition_class.tree_node
self._definition_class = definition_class
@property
def name(self):
return ValueName(self, self.tree_node.name)
def py__simple_getitem__(self, index):
if isinstance(index, str):
return ValueSet.from_sets(
name.infer()
for filter in self._definition_class.get_filters(is_instance=True)
for name in filter.get(index)
)
return NO_VALUES
def get_key_values(self):
filtered_values = itertools.chain.from_iterable((
f.values()
for f in self._definition_class.get_filters(is_instance=True)
))
return ValueSet({
create_simple_object(self.inference_state, v.string_name)
for v in filtered_values
})
def _get_wrapped_value(self):
d, = self.inference_state.builtins_module.py__getattribute__('dict')
result, = d.execute_with_values()
return result
| TypedDict |
python | kamyu104__LeetCode-Solutions | Python/web-crawler.py | {
"start": 293,
"end": 1013
} | class ____(object):
def crawl(self, startUrl, htmlParser):
"""
:type startUrl: str
:type htmlParser: HtmlParser
:rtype: List[str]
"""
SCHEME = "http://"
def hostname(url):
pos = url.find('/', len(SCHEME))
if pos == -1:
return url
return url[:pos]
result = [startUrl]
lookup = set(result)
for from_url in result:
name = hostname(from_url)
for to_url in htmlParser.getUrls(from_url):
if to_url not in lookup and name == hostname(to_url):
result.append(to_url)
lookup.add(to_url)
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/height-of-special-binary-tree.py | {
"start": 670,
"end": 1165
} | class ____(object):
def heightOfTree(self, root):
"""
:type root: Optional[TreeNode]
:rtype: int
"""
result = -1
q = [root]
while q:
new_q = []
for u in q:
if u.left and u.left.right != u:
new_q.append(u.left)
if u.right and u.right.left != u:
new_q.append(u.right)
q = new_q
result += 1
return result
| Solution2 |
python | pallets__werkzeug | src/werkzeug/routing/converters.py | {
"start": 5659,
"end": 6508
} | class ____(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(
self,
map: Map,
min: float | None = None,
max: float | None = None,
signed: bool = False,
) -> None:
super().__init__(map, min=min, max=max, signed=signed) # type: ignore
| FloatConverter |
python | getsentry__sentry | src/sentry/rules/conditions/every_event.py | {
"start": 151,
"end": 437
} | class ____(EventCondition):
id = "sentry.rules.conditions.every_event.EveryEventCondition"
label = "The event occurs"
def passes(self, event: GroupEvent, state: EventState) -> bool:
return True
def is_enabled(self) -> bool:
return False
| EveryEventCondition |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 2275,
"end": 2379
} | class ____(StepStartedEvent, Event):
type: EventType = EventType.STEP_STARTED
| StepStartedWorkflowEvent |
python | pallets__werkzeug | src/werkzeug/middleware/shared_data.py | {
"start": 967,
"end": 9542
} | class ____:
"""A WSGI middleware which provides static content for development
environments or simple server setups. Its usage is quite simple::
import os
from werkzeug.middleware.shared_data import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. Files can also be
mounted on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/static': ('myapplication', 'static')
})
This will then serve the ``static`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non-ASCII filenames. If the
encoding on the file system happens to match the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
:param fallback_mimetype: The fallback mimetype for unknown files.
.. versionchanged:: 1.0
The default ``fallback_mimetype`` is
``application/octet-stream``. If a filename looks like a text
mimetype, the ``utf-8`` charset is added to it.
.. versionadded:: 0.6
Added ``fallback_mimetype``.
.. versionchanged:: 0.5
Added ``cache_timeout``.
"""
def __init__(
self,
app: WSGIApplication,
exports: (
cabc.Mapping[str, str | tuple[str, str]]
| t.Iterable[tuple[str, str | tuple[str, str]]]
),
disallow: None = None,
cache: bool = True,
cache_timeout: int = 60 * 60 * 12,
fallback_mimetype: str = "application/octet-stream",
) -> None:
self.app = app
self.exports: list[tuple[str, _TLoader]] = []
self.cache = cache
self.cache_timeout = cache_timeout
if isinstance(exports, cabc.Mapping):
exports = exports.items()
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, str):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError(f"unknown def {value!r}")
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename: str) -> bool:
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename: str) -> _TOpener:
return lambda: (
open(filename, "rb"),
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
int(os.path.getsize(filename)),
)
def get_file_loader(self, filename: str) -> _TLoader:
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package: str, package_path: str) -> _TLoader:
load_time = datetime.now(timezone.utc)
spec = importlib.util.find_spec(package)
reader = spec.loader.get_resource_reader(package) # type: ignore[union-attr]
def loader(
path: str | None,
) -> tuple[str | None, _TOpener | None]:
if path is None:
return None, None
path = safe_join(package_path, path)
if path is None:
return None, None
basename = posixpath.basename(path)
try:
resource = reader.open_resource(path)
except OSError:
return None, None
if isinstance(resource, BytesIO):
return (
basename,
lambda: (resource, load_time, len(resource.getvalue())),
)
return (
basename,
lambda: (
resource,
datetime.fromtimestamp(
os.path.getmtime(resource.name), tz=timezone.utc
),
os.path.getsize(resource.name),
),
)
return loader
def get_directory_loader(self, directory: str) -> _TLoader:
def loader(
path: str | None,
) -> tuple[str | None, _TOpener | None]:
if path is not None:
path = safe_join(directory, path)
if path is None:
return None, None
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
fn_str = os.fsencode(real_filename)
timestamp = mtime.timestamp()
checksum = adler32(fn_str) & 0xFFFFFFFF
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
path = get_path_info(environ)
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith("/"):
search_path += "/"
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path) :])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename): # type: ignore
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename) # type: ignore
mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename) # type: ignore
headers += [
("Etag", f'"{etag}"'),
("Cache-Control", f"max-age={timeout}, public"),
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response("304 Not Modified", headers)
return []
headers.append(("Expires", http_date(time() + timeout)))
else:
headers.append(("Cache-Control", "public"))
headers.extend(
(
("Content-Type", mime_type),
("Content-Length", str(file_size)),
("Last-Modified", http_date(mtime)),
)
)
start_response("200 OK", headers)
return wrap_file(environ, f)
| SharedDataMiddleware |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/search_ads.py | {
"start": 2066,
"end": 5122
} | class ____(_GoogleSearchAdsBaseOperator):
"""
Search a report by query.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/reporting/api/reference/rest/v0/customers.searchAds360/search
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsSearchOperator`
:param customer_id: The ID of the customer being queried.
:param query: The query to execute.
:param page_token: Token of the page to retrieve. If not specified, the first page of results will be
returned. Use the value obtained from `next_page_token` in the previous response
in order to request the next page of results.
:param page_size: Number of elements to retrieve in a single page. When too large a page is requested,
the server may decide to further limit the number of returned resources.
Default is 10000.
:param return_total_results_count: If true, the total number of results that match the query ignoring
the LIMIT clause will be included in the response. Default is false.
:param summary_row_setting: Determines whether a summary row will be returned. By default,
summary row is not returned. If requested, the summary row will be sent
in a response by itself after all others query results are returned.
:param validate_only: If true, the request is validated but not executed. Default is false.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: The version of the API that will be requested for example 'v0'.
"""
template_fields: Sequence[str] = (
*_GoogleSearchAdsBaseOperator.template_fields,
"page_token",
"page_size",
)
def __init__(
self,
*,
customer_id: str,
query: str,
page_token: str | None = None,
page_size: int = 10000,
return_total_results_count: bool = False,
summary_row_setting: str | None = None,
validate_only: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.customer_id = customer_id
self.query = query
self.page_token = page_token
self.page_size = page_size
self.return_total_results_count = return_total_results_count
self.summary_row_setting = summary_row_setting
self.validate_only = validate_only
def execute(self, context: Context):
self.log.info("Querying Search Ads")
response = self.hook.search(
customer_id=self.customer_id,
query=self.query,
page_size=self.page_size,
page_token=self.page_token,
return_total_results_count=self.return_total_results_count,
summary_row_setting=self.summary_row_setting,
validate_only=self.validate_only,
)
self.log.info("Query result: %s", response)
return response
| GoogleSearchAdsSearchOperator |
python | redis__redis-py | redis/cache.py | {
"start": 7630,
"end": 7680
} | class ____(Enum):
LRU = LRUPolicy
| EvictionPolicy |
python | fabric__fabric | tests/connection.py | {
"start": 1647,
"end": 59810
} | class ____:
class basic_attributes:
def is_connected_defaults_to_False(self):
assert Connection("host").is_connected is False
def client_defaults_to_a_new_SSHClient(self):
c = Connection("host").client
assert isinstance(c, SSHClient)
assert c.get_transport() is None
class known_hosts_behavior:
def defaults_to_auto_add(self):
# TODO: change Paramiko API so this isn't a private access
# TODO: maybe just merge with the __init__ test that is similar
assert isinstance(Connection("host").client._policy, AutoAddPolicy)
class init:
"__init__"
class host:
@raises(TypeError)
def is_required(self):
Connection()
def is_exposed_as_attribute(self):
assert Connection("host").host == "host" # buffalo buffalo
def may_contain_user_shorthand(self):
c = Connection("user@host")
assert c.host == "host"
assert c.user == "user"
def may_contain_port_shorthand(self):
c = Connection("host:123")
assert c.host == "host"
assert c.port == 123
def may_contain_user_and_port_shorthand(self):
c = Connection("user@host:123")
assert c.host == "host"
assert c.user == "user"
assert c.port == 123
def ipv6_addresses_work_ok_but_avoid_port_shorthand(self):
for addr in ("2001:DB8:0:0:0:0:0:1", "2001:DB8::1", "::1"):
c = Connection(addr, port=123)
assert c.user == get_local_user()
assert c.host == addr
assert c.port == 123
c2 = Connection("somebody@{}".format(addr), port=123)
assert c2.user == "somebody"
assert c2.host == addr
assert c2.port == 123
class user:
def defaults_to_local_user_with_no_config(self):
# Tautology-tastic!
assert Connection("host").user == get_local_user()
def accepts_config_user_option(self):
config = Config(overrides={"user": "nobody"})
assert Connection("host", config=config).user == "nobody"
def may_be_given_as_kwarg(self):
assert Connection("host", user="somebody").user == "somebody"
@raises(ValueError)
def errors_when_given_as_both_kwarg_and_shorthand(self):
Connection("user@host", user="otheruser")
def kwarg_wins_over_config(self):
config = Config(overrides={"user": "nobody"})
cxn = Connection("host", user="somebody", config=config)
assert cxn.user == "somebody"
def shorthand_wins_over_config(self):
config = Config(overrides={"user": "nobody"})
cxn = Connection("somebody@host", config=config)
assert cxn.user == "somebody"
class port:
def defaults_to_22_because_yup(self):
assert Connection("host").port == 22
def accepts_configuration_port(self):
config = Config(overrides={"port": 2222})
assert Connection("host", config=config).port == 2222
def may_be_given_as_kwarg(self):
assert Connection("host", port=2202).port == 2202
@raises(ValueError)
def errors_when_given_as_both_kwarg_and_shorthand(self):
Connection("host:123", port=321)
def kwarg_wins_over_config(self):
config = Config(overrides={"port": 2222})
cxn = Connection("host", port=123, config=config)
assert cxn.port == 123
def shorthand_wins_over_config(self):
config = Config(overrides={"port": 2222})
cxn = Connection("host:123", config=config)
assert cxn.port == 123
class forward_agent:
def defaults_to_False(self):
assert Connection("host").forward_agent is False
def accepts_configuration_value(self):
config = Config(overrides={"forward_agent": True})
assert Connection("host", config=config).forward_agent is True
def may_be_given_as_kwarg(self):
cxn = Connection("host", forward_agent=True)
assert cxn.forward_agent is True
def kwarg_wins_over_config(self):
config = Config(overrides={"forward_agent": True})
cxn = Connection("host", forward_agent=False, config=config)
assert cxn.forward_agent is False
class connect_timeout:
def defaults_to_None(self):
assert Connection("host").connect_timeout is None
def accepts_configuration_value(self):
config = Config(overrides={"timeouts": {"connect": 10}})
assert Connection("host", config=config).connect_timeout == 10
def may_be_given_as_kwarg(self):
cxn = Connection("host", connect_timeout=15)
assert cxn.connect_timeout == 15
def kwarg_wins_over_config(self):
config = Config(overrides={"timeouts": {"connect": 20}})
cxn = Connection("host", connect_timeout=100, config=config)
assert cxn.connect_timeout == 100
class config:
# NOTE: behavior local to Config itself is tested in its own test
# module; below is solely about Connection's config kwarg and its
# handling of that value
def is_not_required(self):
assert Connection("host").config.__class__ == Config
def can_be_specified(self):
c = Config(overrides={"user": "me", "custom": "option"})
config = Connection("host", config=c).config
assert c is config
assert config["user"] == "me"
assert config["custom"] == "option"
def if_given_an_invoke_Config_we_upgrade_to_our_own_Config(self):
# Scenario: user has Fabric-level data present at vanilla
# Invoke config level, and is then creating Connection objects
# with those vanilla invoke Configs.
# (Could also _not_ have any Fabric-level data, but then that's
# just a base case...)
# TODO: adjust this if we ever switch to all our settings being
# namespaced...
vanilla = InvokeConfig(overrides={"forward_agent": True})
cxn = Connection("host", config=vanilla)
assert cxn.forward_agent is True # not False, which is default
class gateway:
def is_optional_and_defaults_to_None(self):
c = Connection(host="host")
assert c.gateway is None
def takes_a_Connection(self):
c = Connection("host", gateway=Connection("otherhost"))
assert isinstance(c.gateway, Connection)
assert c.gateway.host == "otherhost"
def takes_a_string(self):
c = Connection("host", gateway="meh")
assert c.gateway == "meh"
def accepts_configuration_value(self):
gw = Connection("jumpbox")
config = Config(overrides={"gateway": gw})
# TODO: the fact that they will be eq, but _not_ necessarily be
# the same object, could be problematic in some cases...
cxn = Connection("host", config=config)
assert cxn.gateway == gw
class initializes_client:
@patch("fabric.connection.SSHClient")
def instantiates_empty_SSHClient(self, Client):
Connection("host")
Client.assert_called_once_with()
@patch("fabric.connection.AutoAddPolicy")
def sets_missing_host_key_policy(self, Policy, client):
# TODO: should make the policy configurable early on
sentinel = Mock()
Policy.return_value = sentinel
Connection("host")
set_policy = client.set_missing_host_key_policy
set_policy.assert_called_once_with(sentinel)
def is_made_available_as_client_attr(self, client):
# NOTE: client is SSHClient.return_value
assert Connection("host").client is client
class ssh_config:
def _runtime_config(self, overrides=None, basename="runtime"):
confname = "{}.conf".format(basename)
runtime_path = join(support, "ssh_config", confname)
if overrides is None:
overrides = {}
return Config(
runtime_ssh_path=runtime_path, overrides=overrides
)
def _runtime_cxn(self, **kwargs):
config = self._runtime_config(**kwargs)
return Connection("runtime", config=config)
def effectively_blank_when_no_loaded_config(self):
c = Config(ssh_config=SSHConfig())
cxn = Connection("host", config=c)
# NOTE: paramiko always injects this even if you look up a host
# that has no rules, even wildcard ones.
assert cxn.ssh_config == {"hostname": "host"}
def shows_result_of_lookup_when_loaded_config(self):
conf = self._runtime_cxn().ssh_config
expected = {
"connecttimeout": "15",
"forwardagent": "yes",
"hostname": "runtime",
"identityfile": ["whatever.key", "some-other.key"],
"port": "666",
"proxycommand": "my gateway",
"user": "abaddon",
}
assert conf == expected
class hostname:
def original_host_always_set(self):
cxn = Connection("somehost")
assert cxn.original_host == "somehost"
assert cxn.host == "somehost"
def hostname_directive_overrides_host_attr(self):
# TODO: not 100% convinced this is the absolute most
# obvious API for 'translation' of given hostname to
# ssh-configured hostname, but it feels okay for now.
path = join(
support, "ssh_config", "overridden_hostname.conf"
)
config = Config(runtime_ssh_path=path)
cxn = Connection("aliasname", config=config)
assert cxn.host == "realname"
assert cxn.original_host == "aliasname"
assert cxn.port == 2222
class user:
def wins_over_default(self):
assert self._runtime_cxn().user == "abaddon"
def wins_over_configuration(self):
cxn = self._runtime_cxn(overrides={"user": "baal"})
assert cxn.user == "abaddon"
def loses_to_explicit(self):
# Would be 'abaddon', as above
config = self._runtime_config()
cxn = Connection("runtime", config=config, user="set")
assert cxn.user == "set"
class port:
def wins_over_default(self):
assert self._runtime_cxn().port == 666
def wins_over_configuration(self):
cxn = self._runtime_cxn(overrides={"port": 777})
assert cxn.port == 666
def loses_to_explicit(self):
config = self._runtime_config() # Would be 666, as above
cxn = Connection("runtime", config=config, port=777)
assert cxn.port == 777
class forward_agent:
def wins_over_default(self):
assert self._runtime_cxn().forward_agent is True
def wins_over_configuration(self):
# Of course, this "config override" is also the same as the
# default. Meh.
cxn = self._runtime_cxn(overrides={"forward_agent": False})
assert cxn.forward_agent is True
def loses_to_explicit(self):
# Would be True, as above
config = self._runtime_config()
cxn = Connection(
"runtime", config=config, forward_agent=False
)
assert cxn.forward_agent is False
class proxy_command:
def wins_over_default(self):
assert self._runtime_cxn().gateway == "my gateway"
def wins_over_configuration(self):
cxn = self._runtime_cxn(overrides={"gateway": "meh gw"})
assert cxn.gateway == "my gateway"
def loses_to_explicit(self):
# Would be "my gateway", as above
config = self._runtime_config()
cxn = Connection(
"runtime", config=config, gateway="other gateway"
)
assert cxn.gateway == "other gateway"
def explicit_False_turns_off_feature(self):
# This isn't as necessary for things like user/port, which
# _may not_ be None in the end - this setting could be.
config = self._runtime_config()
cxn = Connection("runtime", config=config, gateway=False)
assert cxn.gateway is False
class proxy_jump:
def setup(self):
self._expected_gw = Connection("jumpuser@jumphost:373")
def wins_over_default(self):
cxn = self._runtime_cxn(basename="proxyjump")
assert cxn.gateway == self._expected_gw
def wins_over_configuration(self):
cxn = self._runtime_cxn(
basename="proxyjump", overrides={"gateway": "meh gw"}
)
assert cxn.gateway == self._expected_gw
def loses_to_explicit(self):
# Would be a Connection equal to self._expected_gw, as
# above
config = self._runtime_config(basename="proxyjump")
cxn = Connection(
"runtime", config=config, gateway="other gateway"
)
assert cxn.gateway == "other gateway"
def explicit_False_turns_off_feature(self):
config = self._runtime_config(basename="proxyjump")
cxn = Connection("runtime", config=config, gateway=False)
assert cxn.gateway is False
def wins_over_proxycommand(self):
cxn = self._runtime_cxn(basename="both_proxies")
assert cxn.gateway == Connection("winner@everything:777")
def multi_hop_works_ok(self):
cxn = self._runtime_cxn(basename="proxyjump_multi")
innermost = cxn.gateway.gateway.gateway
middle = cxn.gateway.gateway
outermost = cxn.gateway
assert innermost == Connection("jumpuser3@jumphost3:411")
assert middle == Connection("jumpuser2@jumphost2:872")
assert outermost == Connection("jumpuser@jumphost:373")
def wildcards_do_not_trigger_recursion(self):
# When #1850 is present, this will RecursionError.
conf = self._runtime_config(basename="proxyjump_recursive")
cxn = Connection("runtime.tld", config=conf)
assert cxn.gateway == Connection("bastion.tld")
assert cxn.gateway.gateway is None
def multihop_plus_wildcards_still_no_recursion(self):
conf = self._runtime_config(
basename="proxyjump_multi_recursive"
)
cxn = Connection("runtime.tld", config=conf)
outer = cxn.gateway
inner = cxn.gateway.gateway
assert outer == Connection("bastion1.tld")
assert inner == Connection("bastion2.tld")
assert inner.gateway is None
def gateway_Connections_get_parent_connection_configs(self):
conf = self._runtime_config(
basename="proxyjump",
overrides={"some_random_option": "a-value"},
)
cxn = Connection("runtime", config=conf)
# Safety
assert cxn.config is conf
assert cxn.gateway == self._expected_gw
# Real check
assert cxn.gateway.config.some_random_option == "a-value"
# Prove copy not reference
# TODO: would we ever WANT a reference? can't imagine...
assert cxn.gateway.config is not conf
class connect_timeout:
def wins_over_default(self):
assert self._runtime_cxn().connect_timeout == 15
def wins_over_configuration(self):
cxn = self._runtime_cxn(
overrides={"timeouts": {"connect": 17}}
)
assert cxn.connect_timeout == 15
def loses_to_explicit(self):
config = self._runtime_config()
cxn = Connection(
"runtime", config=config, connect_timeout=23
)
assert cxn.connect_timeout == 23
class identity_file:
# NOTE: ssh_config value gets merged w/ (instead of overridden
# by) config and kwarg values; that is tested in the tests for
# open().
def basic_loading_of_value(self):
# By default, key_filename will be empty, and the data from
# the runtime ssh config will be all that appears.
value = self._runtime_cxn().connect_kwargs["key_filename"]
assert value == ["whatever.key", "some-other.key"]
class connect_kwargs:
def defaults_to_empty_dict(self):
assert Connection("host").connect_kwargs == {}
def may_be_given_explicitly(self):
cxn = Connection("host", connect_kwargs={"foo": "bar"})
assert cxn.connect_kwargs == {"foo": "bar"}
def may_be_configured(self):
c = Config(overrides={"connect_kwargs": {"origin": "config"}})
cxn = Connection("host", config=c)
assert cxn.connect_kwargs == {"origin": "config"}
def kwarg_wins_over_config(self):
# TODO: should this be more of a merge-down?
c = Config(overrides={"connect_kwargs": {"origin": "config"}})
cxn = Connection(
"host", connect_kwargs={"origin": "kwarg"}, config=c
)
assert cxn.connect_kwargs == {"origin": "kwarg"}
class inline_ssh_env:
def defaults_to_config_value(self):
assert Connection("host").inline_ssh_env is True
config = Config({"inline_ssh_env": False})
assert (
Connection("host", config=config).inline_ssh_env is False
)
def may_be_given(self):
assert Connection("host").inline_ssh_env is True
cxn = Connection("host", inline_ssh_env=False)
assert cxn.inline_ssh_env is False
class from_v1:
def setup(self):
self.env = faux_v1_env()
def _cxn(self, **kwargs):
self.env.update(kwargs)
return Connection.from_v1(self.env)
def must_be_given_explicit_env_arg(self):
cxn = Connection.from_v1(self.env)
assert cxn.host == "localghost"
class obtaining_config:
@patch("fabric.connection.Config.from_v1")
def defaults_to_calling_Config_from_v1(self, Config_from_v1):
Connection.from_v1(self.env)
Config_from_v1.assert_called_once_with(self.env)
@patch("fabric.connection.Config.from_v1")
def may_be_given_config_explicitly(self, Config_from_v1):
# Arguably a dupe of regular Connection constructor behavior,
# but whatever.
Connection.from_v1(env=self.env, config=Config())
assert not Config_from_v1.called
class additional_kwargs:
# I.e. as opposed to what happens to the 'env' kwarg...
def forwards_arbitrary_kwargs_to_init(self):
cxn = Connection.from_v1(
self.env,
connect_kwargs={"foo": "bar"},
inline_ssh_env=False,
connect_timeout=15,
)
assert cxn.connect_kwargs["foo"] == "bar"
assert cxn.inline_ssh_env is False
assert cxn.connect_timeout == 15
def conflicting_kwargs_win_over_v1_env_values(self):
env = Lexicon(self.env)
cxn = Connection.from_v1(
env, host="not-localghost", port=2222, user="remoteuser"
)
assert cxn.host == "not-localghost"
assert cxn.user == "remoteuser"
assert cxn.port == 2222
class var_mappings:
def host_string(self):
cxn = self._cxn() # default is 'localghost'
assert cxn.host == "localghost"
@raises(InvalidV1Env)
def None_host_string_errors_usefully(self):
self._cxn(host_string=None)
def user(self):
cxn = self._cxn(user="space")
assert cxn.user == "space"
class port:
def basic(self):
cxn = self._cxn(port=2222)
assert cxn.port == 2222
def casted_to_int(self):
cxn = self._cxn(port="2222")
assert cxn.port == 2222
def not_supplied_if_given_in_host_string(self):
cxn = self._cxn(host_string="localghost:3737", port=2222)
assert cxn.port == 3737
class string_representation:
"string representations"
def str_displays_repr(self):
c = Connection("meh")
assert str(c) == "<Connection host=meh>"
def displays_core_params(self):
c = Connection(user="me", host="there", port=123)
template = "<Connection host=there user=me port=123>"
assert repr(c) == template
def omits_default_param_values(self):
c = Connection("justhost")
assert repr(c) == "<Connection host=justhost>"
def param_comparison_uses_config(self):
conf = Config(overrides={"user": "zerocool"})
c = Connection(
user="zerocool", host="myhost", port=123, config=conf
)
template = "<Connection host=myhost port=123>"
assert repr(c) == template
def proxyjump_gateway_shows_type(self):
c = Connection(host="myhost", gateway=Connection("jump"))
template = "<Connection host=myhost gw=proxyjump>"
assert repr(c) == template
def proxycommand_gateway_shows_type(self):
c = Connection(host="myhost", gateway="netcat is cool")
template = "<Connection host=myhost gw=proxycommand>"
assert repr(c) == template
class comparison_and_hashing:
def comparison_uses_host_user_and_port(self):
# Just host
assert Connection("host") == Connection("host")
# Host + user
c1 = Connection("host", user="foo")
c2 = Connection("host", user="foo")
assert c1 == c2
# Host + user + port
c1 = Connection("host", user="foo", port=123)
c2 = Connection("host", user="foo", port=123)
assert c1 == c2
def comparison_to_non_Connections_is_False(self):
assert Connection("host") != 15
def hashing_works(self):
assert hash(Connection("host")) == hash(Connection("host"))
def sorting_works(self):
# Hostname...
assert Connection("a-host") < Connection("b-host")
# User...
assert Connection("a-host", user="a-user") < Connection(
"a-host", user="b-user"
)
# then port...
assert Connection("a-host", port=1) < Connection("a-host", port=2)
class open:
def has_no_required_args_and_returns_value_of_connect(self, client):
retval = Connection("host").open()
assert retval is client.connect.return_value
def calls_SSHClient_connect(self, client):
"calls paramiko.SSHClient.connect() with correct args"
Connection("host").open()
client.connect.assert_called_with(
username=get_local_user(), hostname="host", port=22
)
def passes_through_connect_kwargs(self, client):
Connection("host", connect_kwargs={"foobar": "bizbaz"}).open()
client.connect.assert_called_with(
username=get_local_user(),
hostname="host",
port=22,
foobar="bizbaz",
)
def refuses_to_overwrite_connect_kwargs_with_others(self, client):
for key, value, kwargs in (
# Core connection args should definitely not get overwritten!
# NOTE: recall that these keys are the SSHClient.connect()
# kwarg names, NOT our own config/kwarg names!
("hostname", "nothost", {}),
("port", 17, {}),
("username", "zerocool", {}),
# These might arguably still be allowed to work, but let's head
# off confusion anyways.
("timeout", 100, {"connect_timeout": 25}),
):
try:
Connection(
"host", connect_kwargs={key: value}, **kwargs
).open()
except ValueError as e:
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
assert str(e) == err.format(key)
else:
assert False, "Did not raise ValueError!"
def connect_kwargs_protection_not_tripped_by_defaults(self, client):
Connection("host", connect_kwargs={"timeout": 300}).open()
client.connect.assert_called_with(
username=get_local_user(),
hostname="host",
port=22,
timeout=300,
)
def submits_connect_timeout(self, client):
Connection("host", connect_timeout=27).open()
client.connect.assert_called_with(
username=get_local_user(), hostname="host", port=22, timeout=27
)
def is_connected_True_when_successful(self, client):
c = Connection("host")
c.open()
assert c.is_connected is True
def short_circuits_if_already_connected(self, client):
cxn = Connection("host")
# First call will set self.transport to fixture's mock
cxn.open()
# Second call will check .is_connected which will see active==True,
# and short circuit
cxn.open()
assert client.connect.call_count == 1
def is_connected_still_False_when_connect_fails(self, client):
client.connect.side_effect = socket.error
cxn = Connection("host")
try:
cxn.open()
except socket.error:
pass
assert cxn.is_connected is False
def uses_configured_user_host_and_port(self, client):
Connection(user="myuser", host="myhost", port=9001).open()
client.connect.assert_called_once_with(
username="myuser", hostname="myhost", port=9001
)
# NOTE: does more involved stuff so can't use "client" fixture
@patch("fabric.connection.SSHClient")
def uses_gateway_channel_as_sock_for_SSHClient_connect(self, Client):
"uses Connection gateway as 'sock' arg to SSHClient.connect"
# Setup
mock_gw = Mock()
mock_main = Mock()
Client.side_effect = [mock_gw, mock_main]
gw = Connection("otherhost")
gw.open = Mock(wraps=gw.open)
main = Connection("host", gateway=gw)
main.open()
# Expect gateway is also open()'d
gw.open.assert_called_once_with()
# Expect direct-tcpip channel open on 1st client
open_channel = mock_gw.get_transport.return_value.open_channel
kwargs = open_channel.call_args[1]
assert kwargs["kind"] == "direct-tcpip"
assert kwargs["dest_addr"], "host" == 22
# Expect result of that channel open as sock arg to connect()
sock_arg = mock_main.connect.call_args[1]["sock"]
assert sock_arg is open_channel.return_value
@patch("fabric.connection.ProxyCommand")
def uses_proxycommand_as_sock_for_Client_connect(self, moxy, client):
"uses ProxyCommand from gateway as 'sock' arg to SSHClient.connect"
# Setup
main = Connection("host", gateway="net catty %h %p")
main.open()
# Expect ProxyCommand instantiation
moxy.assert_called_once_with("net catty host 22")
# Expect result of that as sock arg to connect()
sock_arg = client.connect.call_args[1]["sock"]
assert sock_arg is moxy.return_value
# TODO: all the various connect-time options such as agent forwarding,
# host acceptance policies, how to auth, etc etc. These are all aspects
# of a given session and not necessarily the same for entire lifetime
# of a Connection object, should it ever disconnect/reconnect.
# TODO: though some/all of those things might want to be set to
# defaults at initialization time...
class connect_kwargs_key_filename:
"connect_kwargs(key_filename=...)"
# TODO: it'd be nice to truly separate CLI from regular (non override
# level) invoke config; as it is, invoke config comes first in expected
# outputs since otherwise there's no way for --identity to "come
# first".
@pytest.mark.parametrize(
"ssh, invoke, kwarg, expected",
[
param(
True,
True,
True,
[
"configured.key",
"kwarg.key",
"ssh-config-B.key",
"ssh-config-A.key",
],
id="All sources",
),
param(
True,
True,
"kwarg.key",
[
"configured.key",
"kwarg.key",
"ssh-config-B.key",
"ssh-config-A.key",
],
id="All sources, kwarg (string)",
),
param(False, False, False, [], id="No sources"),
param(
True,
False,
False,
["ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config only",
),
param(
False,
True,
False,
["configured.key"],
id="Invoke-level config only",
),
param(
False,
False,
True,
["kwarg.key"],
id="Connection kwarg only",
),
param(
False,
False,
"kwarg.key",
["kwarg.key"],
id="Connection kwarg (string) only",
),
param(
True,
True,
False,
["configured.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config + invoke config, no kwarg",
),
param(
True,
False,
True,
["kwarg.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config + kwarg, no Invoke-level config",
),
param(
True,
False,
"kwarg.key",
["kwarg.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config + kwarg (string), no Invoke-level config",
),
param(
False,
True,
True,
["configured.key", "kwarg.key"],
id="Invoke-level config + kwarg, no ssh_config",
),
param(
False,
True,
"kwarg.key",
["configured.key", "kwarg.key"],
id="Invoke-level config + kwarg (string), no ssh_config",
),
param(
True,
"string.key",
False,
["string.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config, string Invoke config, no kwarg",
),
param(
False,
"string.key",
True,
["string.key", "kwarg.key"],
id="no ssh_config, string Invoke config, list kwarg",
),
param(
False,
"config.key",
"kwarg.key",
["config.key", "kwarg.key"],
id="no ssh_config, string Invoke config, string kwarg",
),
param(
True,
"config.key",
"kwarg.key",
[
"config.key",
"kwarg.key",
"ssh-config-B.key",
"ssh-config-A.key",
],
id="ssh_config, string Invoke config, string kwarg",
),
],
)
def merges_sources(self, client, ssh, invoke, kwarg, expected):
config_kwargs = {}
if ssh:
# SSH config with 2x IdentityFile directives.
config_kwargs["runtime_ssh_path"] = join(
support, "ssh_config", "runtime_identity.conf"
)
if invoke:
# Assume string value if not literal True
value = ["configured.key"] if invoke is True else invoke
# Use overrides config level to mimic --identity use NOTE: (the
# fact that --identity is an override, and thus overrides eg
# invoke config file values is part of invoke's config test
# suite)
config_kwargs["overrides"] = {
"connect_kwargs": {"key_filename": value}
}
conf = Config(**config_kwargs)
connect_kwargs = {}
if kwarg:
# Assume string value if not literal True
value = ["kwarg.key"] if kwarg is True else kwarg
connect_kwargs = {"key_filename": value}
# Tie in all sources that were configured & open()
Connection(
"runtime", config=conf, connect_kwargs=connect_kwargs
).open()
# Ensure we got the expected list of keys
kwargs = client.connect.call_args[1]
if expected:
assert kwargs["key_filename"] == expected
else:
# No key filenames -> it's not even passed in as connect_kwargs
# is gonna be a blank dict
assert "key_filename" not in kwargs
class close:
def has_no_required_args_and_returns_None(self, client):
c = Connection("host")
c.open()
assert c.close() is None
def calls_SSHClient_close(self, client):
"calls paramiko.SSHClient.close()"
c = Connection("host")
c.open()
c.close()
client.close.assert_called_with()
def calls_SFTPClient_close(self, client):
"calls paramiko.SFTPClient.close()"
c = Connection("host")
c.open()
sftp_client = c.sftp()
assert c._sftp is not None
c.close()
assert c._sftp is None
sftp_client.close.assert_called_with()
def calls_SFTPClient_close_not_called_if_not_open(self, client):
"calls paramiko.SFTPClient.close()"
c = Connection("host")
c.open()
assert c._sftp is None
c.close()
assert c._sftp is None
@patch("fabric.connection.AgentRequestHandler")
def calls_agent_handler_close_if_enabled(self, Handler, client):
c = Connection("host", forward_agent=True)
c.create_session()
c.close()
# NOTE: this will need to change if, for w/e reason, we ever want
# to run multiple handlers at once
Handler.return_value.close.assert_called_once_with()
def short_circuits_if_not_connected(self, client):
c = Connection("host")
# Won't trigger close() on client because it'll already think it's
# closed (due to no .transport & the behavior of .is_connected)
c.close()
assert not client.close.called
def class_works_as_a_closing_contextmanager(self, client):
with Connection("host") as c:
c.open()
client.close.assert_called_once_with()
class create_session:
def calls_open_for_you(self, client):
c = Connection("host")
c.open = Mock()
c.transport = Mock() # so create_session no asplode
c.create_session()
assert c.open.called
@patch("fabric.connection.AgentRequestHandler")
def activates_paramiko_agent_forwarding_if_configured(
self, Handler, client
):
c = Connection("host", forward_agent=True)
chan = c.create_session()
Handler.assert_called_once_with(chan)
class run:
# NOTE: most actual run related tests live in the runners module's
# tests. Here we are just testing the outer interface a bit.
@patch(remote_path)
def calls_open_for_you(self, Remote, client):
c = Connection("host")
c.open = Mock()
c.run("command")
assert c.open.called
@patch(remote_path)
def passes_inline_env_to_Remote(self, Remote, client):
Connection("host").run("command")
assert Remote.call_args[1]["inline_env"] is True
Connection("host", inline_ssh_env=False).run("command")
assert Remote.call_args[1]["inline_env"] is False
@patch(remote_path)
def calls_Remote_run_with_command_and_kwargs_and_returns_its_result(
self, Remote, client
):
remote = Remote.return_value
c = Connection("host")
r1 = c.run("command")
r2 = c.run("command", warn=True, hide="stderr")
# NOTE: somehow, .call_args & the methods built on it (like
# .assert_called_with()) stopped working, apparently triggered by
# our code...somehow...after commit (roughly) 80906c7.
# And yet, .call_args_list and its brethren work fine. Wha?
Remote.assert_any_call(context=c, inline_env=True)
remote.run.assert_has_calls(
[call("command"), call("command", warn=True, hide="stderr")]
)
for r in (r1, r2):
assert r is remote.run.return_value
class shell:
def setup(self):
self.defaults = Config.global_defaults()["run"]
@patch(remote_shell_path)
def calls_RemoteShell_run_with_all_kwargs_and_returns_its_result(
self, RemoteShell, client
):
remote = RemoteShell.return_value
cxn = Connection("host")
kwargs = dict(
env={"foo": "bar"},
replace_env=True,
encoding="utf-16",
in_stream=StringIO("meh"),
watchers=["meh"],
)
result = cxn.shell(**kwargs)
RemoteShell.assert_any_call(context=cxn)
assert remote.run.call_count == 1
# Expect explicit use of default values for all kwarg-settings
# besides what shell() itself tweaks
expected = dict(self.defaults, pty=True, command=None, **kwargs)
assert remote.run.call_args[1] == expected
assert result is remote.run.return_value
def raises_TypeError_for_disallowed_kwargs(self, client):
for key in self.defaults.keys():
if key in (
"env",
"replace_env",
"encoding",
"in_stream",
"watchers",
):
continue
with pytest.raises(
TypeError,
match=r"unexpected keyword arguments: \['{}'\]".format(
key
),
):
Connection("host").shell(**{key: "whatever"})
@patch(remote_shell_path)
def honors_config_system_for_allowed_kwargs(self, RemoteShell, client):
remote = RemoteShell.return_value
allowed = dict(
env={"foo": "bar"},
replace_env=True,
encoding="utf-16",
in_stream="sentinel",
watchers=["sentinel"],
)
ignored = dict(echo=True, hide="foo") # Spot check
config = Config({"run": dict(allowed, **ignored)})
cxn = Connection("host", config=config)
cxn.shell()
kwargs = remote.run.call_args[1]
for key, value in allowed.items():
assert kwargs[key] == value
for key, value in ignored.items():
assert kwargs[key] == self.defaults[key]
class local:
# NOTE: most tests for this functionality live in Invoke's runner
# tests.
@patch("invoke.config.Local")
def calls_invoke_Local_run(self, Local):
Connection("host").local("foo")
# NOTE: yet another casualty of the bizarre mock issues
assert call().run("foo") in Local.mock_calls
class sudo:
@patch(remote_path)
def calls_open_for_you(self, Remote, client):
c = Connection("host")
c.open = Mock()
c.sudo("command")
assert c.open.called
@patch(remote_path)
def passes_inline_env_to_Remote(self, Remote, client):
Connection("host").sudo("command")
assert Remote.call_args[1]["inline_env"] is True
Connection("host", inline_ssh_env=False).sudo("command")
assert Remote.call_args[1]["inline_env"] is False
@patch(remote_path)
def basic_invocation(self, Remote, client):
# Technically duplicates Invoke-level tests, but ensures things
# still work correctly at our level.
cxn = Connection("host")
cxn.sudo("foo")
cmd = "sudo -S -p '{}' foo".format(cxn.config.sudo.prompt)
# NOTE: this is another spot where Mock.call_args is inexplicably
# None despite call_args_list being populated. WTF. (Also,
# Remote.return_value is two different Mocks now, despite Remote's
# own Mock having the same ID here and in code under test. WTF!!)
expected = [
call(context=cxn, inline_env=True),
call().run(cmd, watchers=ANY),
]
assert Remote.mock_calls == expected
# NOTE: we used to have a "sudo return value is literally the same
# return value from Remote.run()" safety check here, which is
# completely impossible now thanks to the above issue.
def per_host_password_works_as_expected(self):
# TODO: needs clearly defined "per-host" config API, if a distinct
# one is necessary besides "the config obj handed in when
# instantiating the Connection".
# E.g. generate a Connection pulling in a sudo.password value from
# what would be a generic conf file or similar, *and* one more
# specific to that particular Connection (perhaps simply the
# 'override' level?), w/ test asserting the more-specific value is
# what's submitted.
skip()
class sftp:
def returns_result_of_client_open_sftp(self, client):
"returns result of client.open_sftp()"
sentinel = object()
client.open_sftp.return_value = sentinel
assert Connection("host").sftp() == sentinel
client.open_sftp.assert_called_with()
def lazily_caches_result(self, client):
sentinel1, sentinel2 = object(), object()
client.open_sftp.side_effect = [sentinel1, sentinel2]
cxn = Connection("host")
first = cxn.sftp()
# TODO: why aren't we just asserting about calls of open_sftp???
err = "{0!r} wasn't the sentinel object()!"
assert first is sentinel1, err.format(first)
second = cxn.sftp()
assert second is sentinel1, err.format(second)
class get:
@patch("fabric.connection.Transfer")
def calls_Transfer_get(self, Transfer):
"calls Transfer.get()"
c = Connection("host")
c.get("meh")
Transfer.assert_called_with(c)
Transfer.return_value.get.assert_called_with("meh")
class put:
@patch("fabric.connection.Transfer")
def calls_Transfer_put(self, Transfer):
"calls Transfer.put()"
c = Connection("host")
c.put("meh")
Transfer.assert_called_with(c)
Transfer.return_value.put.assert_called_with("meh")
class forward_local:
@patch("fabric.tunnels.select")
@patch("fabric.tunnels.socket.socket")
@patch("fabric.connection.SSHClient")
def _forward_local(self, kwargs, Client, mocket, select):
# Tease out bits of kwargs for use in the mocking/expecting.
# But leave it alone for raw passthru to the API call itself.
# TODO: unhappy with how much this apes the real code & its sig...
local_port = kwargs["local_port"]
remote_port = kwargs.get("remote_port", local_port)
local_host = kwargs.get("local_host", "localhost")
remote_host = kwargs.get("remote_host", "localhost")
# These aren't part of the real sig, but this is easier than trying
# to reconcile the mock decorators + optional-value kwargs. meh.
tunnel_exception = kwargs.pop("tunnel_exception", None)
listener_exception = kwargs.pop("listener_exception", False)
# Mock setup
client = Client.return_value
listener_sock = Mock(name="listener_sock")
if listener_exception:
listener_sock.bind.side_effect = listener_exception
data = "Some data".encode()
tunnel_sock = Mock(name="tunnel_sock", recv=lambda n: data)
local_addr = Mock()
transport = client.get_transport.return_value
channel = transport.open_channel.return_value
# socket.socket is only called once directly
mocket.return_value = listener_sock
# The 2nd socket is obtained via an accept() (which should only
# fire once & raise EAGAIN after)
listener_sock.accept.side_effect = chain(
[(tunnel_sock, local_addr)],
# TODO: should this become BlockingIOError too?
repeat(socket.error(errno.EAGAIN, "nothing yet")),
)
obj = tunnel_sock if tunnel_exception is None else tunnel_exception
select.select.side_effect = _select_result(obj)
with Connection("host").forward_local(**kwargs):
# Make sure we give listener thread enough time to boot up :(
# Otherwise we might assert before it does things. (NOTE:
# doesn't need to be much, even at 0.01s, 0/100 trials failed
# (vs 45/100 with no sleep)
time.sleep(0.015)
assert client.connect.call_args[1]["hostname"] == "host"
listener_sock.setsockopt.assert_called_once_with(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
listener_sock.setblocking.assert_called_once_with(0)
listener_sock.bind.assert_called_once_with(
(local_host, local_port)
)
if not listener_exception:
listener_sock.listen.assert_called_once_with(1)
transport.open_channel.assert_called_once_with(
"direct-tcpip", (remote_host, remote_port), local_addr
)
# Local write to tunnel_sock is implied by its mocked-out
# recv() call above...
# NOTE: don't assert if explodey; we want to mimic "the only
# error that occurred was within the thread" behavior being
# tested by thread-exception-handling tests
if not (tunnel_exception or listener_exception):
channel.sendall.assert_called_once_with(data)
# Shutdown, with another sleep because threads.
time.sleep(0.015)
if not listener_exception:
tunnel_sock.close.assert_called_once_with()
channel.close.assert_called_once_with()
listener_sock.close.assert_called_once_with()
def forwards_local_port_to_remote_end(self):
self._forward_local({"local_port": 1234})
def distinct_remote_port(self):
self._forward_local({"local_port": 1234, "remote_port": 4321})
def non_localhost_listener(self):
self._forward_local(
{"local_port": 1234, "local_host": "nearby_local_host"}
)
def non_remote_localhost_connection(self):
self._forward_local(
{"local_port": 1234, "remote_host": "nearby_remote_host"}
)
def _thread_error(self, which):
class Sentinel(Exception):
pass
try:
self._forward_local(
{
"local_port": 1234,
"{}_exception".format(which): Sentinel,
}
)
except ThreadException as e:
# NOTE: ensures that we're getting what we expected and not
# some deeper, test-bug related error
assert len(e.exceptions) == 1
inner = e.exceptions[0]
err = "Expected wrapped exception to be Sentinel, was {}"
assert inner.type is Sentinel, err.format(inner.type.__name__)
else:
# no exception happened :( implies the thread went boom but
# nobody noticed
err = "Failed to get ThreadException on {} error"
assert False, err.format(which)
def tunnel_errors_bubble_up(self):
self._thread_error("tunnel")
def tunnel_manager_errors_bubble_up(self):
self._thread_error("listener")
# TODO: these require additional refactoring of _forward_local to be
# more like the decorators in _util
def multiple_tunnels_can_be_open_at_once(self):
skip()
class forward_remote:
@patch("fabric.connection.socket.socket")
@patch("fabric.tunnels.select")
@patch("fabric.connection.SSHClient")
def _forward_remote(self, kwargs, Client, select, mocket):
# TODO: unhappy with how much this duplicates of the code under
# test, re: sig/default vals
# Set up parameter values/defaults
remote_port = kwargs["remote_port"]
remote_host = kwargs.get("remote_host", "127.0.0.1")
local_port = kwargs.get("local_port", remote_port)
local_host = kwargs.get("local_host", "localhost")
# Mock/etc setup, anything that can be prepped before the forward
# occurs (which is most things)
tun_socket = mocket.return_value
cxn = Connection("host")
# Channel that will yield data when read from
chan = Mock()
chan.recv.return_value = "data"
# And make select() yield it as being ready once, when called
select.select.side_effect = _select_result(chan)
with cxn.forward_remote(**kwargs):
# At this point Connection.open() has run and generated a
# Transport mock for us (because SSHClient is mocked). Let's
# first make sure we asked it for the port forward...
# NOTE: this feels like it's too limited/tautological a test,
# until you realize that it's functionally impossible to mock
# out everything required for Paramiko's inner guts to run
# _parse_channel_open() and suchlike :(
call = cxn.transport.request_port_forward.call_args_list[0]
assert call[1]["address"] == remote_host
assert call[1]["port"] == remote_port
# Pretend the Transport called our callback with mock Channel
call[1]["handler"](chan, tuple(), tuple())
# Then have to sleep a bit to make sure we give the tunnel
# created by that callback to spin up; otherwise ~5% of the
# time we exit the contextmanager so fast, the tunnel's "you're
# done!" flag is set before it even gets a chance to select()
# once.
time.sleep(0.01)
# And make sure we hooked up to the local socket OK
tup = (local_host, local_port)
tun_socket.connect.assert_called_once_with(tup)
# Expect that our socket got written to by the tunnel (due to the
# above-setup select() and channel mocking). Need to do this after
# tunnel shutdown or we risk thread ordering issues.
tun_socket.sendall.assert_called_once_with("data")
# Ensure we closed down the mock socket
mocket.return_value.close.assert_called_once_with()
# And that the transport canceled the port forward on the remote
# end.
assert cxn.transport.cancel_port_forward.call_count == 1
def forwards_remote_port_to_local_end(self):
self._forward_remote({"remote_port": 1234})
def distinct_local_port(self):
self._forward_remote({"remote_port": 1234, "local_port": 4321})
def non_localhost_connections(self):
self._forward_remote(
{"remote_port": 1234, "local_host": "nearby_local_host"}
)
def remote_non_localhost_listener(self):
self._forward_remote(
{"remote_port": 1234, "remote_host": "192.168.1.254"}
)
# TODO: these require additional refactoring of _forward_remote to be
# more like the decorators in _util
def multiple_tunnels_can_be_open_at_once(self):
skip()
def tunnel_errors_bubble_up(self):
skip()
def listener_errors_bubble_up(self):
skip()
| Connection_ |
python | django__django | django/forms/fields.py | {
"start": 26752,
"end": 28302
} | class ____(CharField):
widget = URLInput
default_error_messages = {
"invalid": _("Enter a valid URL."),
}
default_validators = [validators.URLValidator()]
def __init__(self, *, assume_scheme=None, **kwargs):
self.assume_scheme = assume_scheme or "https"
super().__init__(strip=True, **kwargs)
def to_python(self, value):
def split_url(url):
"""
Return a list of url parts via urlsplit(), or raise
ValidationError for some malformed URLs.
"""
try:
return list(urlsplit(url))
except ValueError:
# urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages["invalid"], code="invalid")
value = super().to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, add a scheme.
url_fields[0] = self.assume_scheme
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ""
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
| URLField |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1248866,
"end": 1249080
} | class ____(Spec, NonNormalizedSpec):
"""RepeatSpec schema wrapper."""
_schema = {"$ref": "#/definitions/RepeatSpec"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| RepeatSpec |
python | getlogbook__logbook | src/logbook/queues.py | {
"start": 18974,
"end": 19460
} | class ____(SubscriberBase):
"""subscribes to a execnet channel"""
def __init__(self, channel):
self.channel = channel
def recv(self, timeout=None):
try:
rv = self.channel.receive(timeout=timeout)
except self.channel.RemoteError:
# XXX: handle
return None
except (self.channel.TimeoutError, EOFError):
return None
else:
return LogRecord.from_dict(rv)
| ExecnetChannelSubscriber |
python | sphinx-doc__sphinx | sphinx/builders/gettext.py | {
"start": 3342,
"end": 4290
} | class ____(SphinxRenderer):
def __init__(
self,
template_path: Sequence[str | os.PathLike[str]] | None = None,
outdir: str | os.PathLike[str] | None = None,
) -> None:
self.outdir = outdir
if template_path is None:
super().__init__([DEFAULT_TEMPLATE_PATH])
else:
super().__init__([*template_path, DEFAULT_TEMPLATE_PATH])
def escape(s: str) -> str:
s = s.replace('\\', r'\\')
s = s.replace('"', r'\"')
return s.replace('\n', '\\n"\n"')
# use texescape as escape filter
self.env.filters['e'] = escape
self.env.filters['escape'] = escape
def render(self, filename: str, context: dict[str, Any]) -> str:
def _relpath(s: str) -> str:
return canon_path(relpath(s, self.outdir))
context['relpath'] = _relpath
return super().render(filename, context)
| GettextRenderer |
python | milvus-io__pymilvus | tests/test_search_result.py | {
"start": 294,
"end": 2447
} | class ____:
@pytest.mark.parametrize("pk_dist", [
{"id": 1, "distance": 0.1, "entity":{}},
{"id": 2, "distance": 0.3, "entity":{}},
{"id": "a", "distance": 0.4, "entity":{}},
])
def test_hit_no_fields(self, pk_dist: Dict):
h = Hit(pk_dist, pk_name="id")
assert h.id == h["id"] == h.get("id") == pk_dist["id"]
assert h.score == h.distance == h["distance"] == h.get("distance") == pk_dist["distance"]
assert h.entity == h
assert h["entity"] == h.get("entity") == {}
assert hasattr(h, "id") is True
assert hasattr(h, "distance") is True
assert hasattr(h, "a_random_attribute") is False
@pytest.mark.parametrize("pk_dist_fields", [
{"id": 1, "distance": 0.1, "entity": {"vector": [1., 2., 3., 4.], "description": "This is a test", 'd_a': "dynamic a"}},
{"id": 2, "distance": 0.3, "entity": {"vector": [3., 4., 5., 6.], "description": "This is a test too", 'd_b': "dynamic b"}},
{"id": "a","distance": 0.4, "entity": {"vector": [4., 4., 4., 4.], "description": "This is a third test", 'd_a': "dynamic a twice"}},
])
def test_hit_with_fields(self, pk_dist_fields: Dict):
h = Hit(pk_dist_fields, pk_name="id")
# fixed attributes
assert h.id == pk_dist_fields["id"]
assert h.id == h.get("id") == h["id"]
assert h.score == pk_dist_fields["distance"]
assert h.distance == h.score
assert h.distance == h.get("distance") == h["distance"]
assert h.entity == pk_dist_fields
assert pk_dist_fields["entity"] == h.get("entity")==h["entity"]
assert hasattr(h, "id") is True
assert hasattr(h, "distance") is True
# dynamic attributes
assert h.description == pk_dist_fields["entity"].get("description")
assert h.vector == pk_dist_fields["entity"].get("vector")
assert hasattr(h, "description") is True
assert hasattr(h, "vector") is True
assert hasattr(h, "a_random_attribute") is False
with pytest.raises(AttributeError):
_ = h.field_not_exits
LOGGER.info(h)
| TestHit |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 250874,
"end": 252946
} | class ____(GeneratedAirbyteSource):
class SingleSchema:
@public
def __init__(
self, stream_name: str, stream_schema: str, stream_duplication: Optional[int] = None
):
self.type = "SINGLE_STREAM"
self.stream_name = check.str_param(stream_name, "stream_name")
self.stream_schema = check.str_param(stream_schema, "stream_schema")
self.stream_duplication = check.opt_int_param(stream_duplication, "stream_duplication")
class MultiSchema:
@public
def __init__(self, stream_schemas: str):
self.type = "MULTI_STREAM"
self.stream_schemas = check.str_param(stream_schemas, "stream_schemas")
@public
def __init__(
self,
name: str,
max_messages: int,
mock_catalog: Union["E2eTestSource.SingleSchema", "E2eTestSource.MultiSchema"],
type: Optional[str] = None,
seed: Optional[int] = None,
message_interval_ms: Optional[int] = None,
):
"""Airbyte Source for E2e Test.
Documentation can be found at https://docs.airbyte.com/integrations/sources/e2e-test
Args:
name (str): The name of the destination.
max_messages (int): Number of records to emit per stream. Min 1. Max 100 billion.
seed (Optional[int]): When the seed is unspecified, the current time millis will be used as the seed. Range: [0, 1000000].
message_interval_ms (Optional[int]): Interval between messages in ms. Min 0 ms. Max 60000 ms (1 minute).
"""
self.type = check.opt_str_param(type, "type")
self.max_messages = check.int_param(max_messages, "max_messages")
self.seed = check.opt_int_param(seed, "seed")
self.message_interval_ms = check.opt_int_param(message_interval_ms, "message_interval_ms")
self.mock_catalog = check.inst_param(
mock_catalog, "mock_catalog", (E2eTestSource.SingleSchema, E2eTestSource.MultiSchema)
)
super().__init__("E2e Test", name)
| E2eTestSource |
python | doocs__leetcode | solution/2200-2299/2237.Count Positions on Street With Required Brightness/Solution.py | {
"start": 0,
"end": 350
} | class ____:
def meetRequirement(
self, n: int, lights: List[List[int]], requirement: List[int]
) -> int:
d = [0] * (n + 1)
for p, r in lights:
i, j = max(0, p - r), min(n - 1, p + r)
d[i] += 1
d[j + 1] -= 1
return sum(s >= r for s, r in zip(accumulate(d), requirement))
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 26078,
"end": 26119
} | class ____(oracle.RAW):
pass
| _OracleRaw |
python | ray-project__ray | python/ray/dashboard/modules/train/train_head.py | {
"start": 1029,
"end": 19055
} | class ____(SubprocessModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._train_stats_actor = None # Train V1
self._train_v2_state_actor = None # Train V2
self._job_info_client = None
self._gcs_actor_info_stub = None
# Lazy initialized HTTP session to NodeHead
self._node_head_http_session = None
# TODO: The next iteration of this should be "/api/train/v2/runs/v2".
# This follows the naming convention of "/api/train/{train_version}/runs/{api_version}".
# This API corresponds to the Train V2 API.
@routes.get("/api/train/v2/runs/v1")
@dashboard_optional_utils.init_ray_and_catch_exceptions()
@DeveloperAPI
async def get_train_v2_runs(self, req: Request) -> Response:
"""Get all TrainRuns for Ray Train V2."""
try:
from ray.train.v2._internal.state.schema import TrainRunsResponse
except ImportError:
logger.exception(
"Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster."
)
return Response(
status=500,
text="Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster.",
)
state_actor = await self.get_train_v2_state_actor()
if state_actor is None:
return Response(
status=500,
text=(
"Train state data is not available. Please make sure Ray Train "
"is running and that the Train state actor is enabled by setting "
'the RAY_TRAIN_ENABLE_STATE_TRACKING environment variable to "1".'
),
)
else:
try:
train_runs = await state_actor.get_train_runs.remote()
decorated_train_runs = await self._decorate_train_runs(
train_runs.values()
)
details = TrainRunsResponse(train_runs=decorated_train_runs)
except ray.exceptions.RayTaskError as e:
# Task failure sometimes are due to GCS
# failure. When GCS failed, we expect a longer time
# to recover.
return Response(
status=503,
text=(
"Failed to get a response from the train stats actor. "
f"The GCS may be down, please retry later: {e}"
),
)
return Response(
text=details.json(),
content_type="application/json",
)
async def _decorate_train_runs(
self, train_runs: List["TrainRun"]
) -> List["DecoratedTrainRun"]:
"""Decorate the train runs with run attempts, job details, status, and status details.
Returns:
List[DecoratedTrainRun]: The decorated train runs in reverse chronological order.
"""
from ray.train.v2._internal.state.schema import DecoratedTrainRun
decorated_train_runs: List[DecoratedTrainRun] = []
state_actor = await self.get_train_v2_state_actor()
all_train_run_attempts = await state_actor.get_train_run_attempts.remote()
jobs = await self._get_jobs([train_run.job_id for train_run in train_runs])
for train_run in train_runs:
# TODO: Batch these together across TrainRuns if needed.
train_run_attempts = all_train_run_attempts[train_run.id].values()
decorated_train_run_attempts: List[
DecoratedTrainRunAttempt
] = await self._decorate_train_run_attempts(train_run_attempts)
job_details = jobs[train_run.job_id]
status, status_details = await self._get_run_status(train_run)
decorated_train_run = DecoratedTrainRun.parse_obj(
{
**train_run.dict(),
"attempts": decorated_train_run_attempts,
"job_details": job_details,
"status": status,
"status_detail": status_details,
}
)
decorated_train_runs.append(decorated_train_run)
# Sort train runs in reverse chronological order
decorated_train_runs = sorted(
decorated_train_runs,
key=lambda run: run.start_time_ns,
reverse=True,
)
return decorated_train_runs
async def _get_jobs(self, job_ids: List[str]) -> Dict[str, "JobDetails"]:
return await find_jobs_by_job_ids(
self.gcs_client,
self._job_info_client,
job_ids,
)
async def _decorate_train_run_attempts(
self, train_run_attempts: List["TrainRunAttempt"]
) -> List["DecoratedTrainRunAttempt"]:
from ray.train.v2._internal.state.schema import DecoratedTrainRunAttempt
decorated_train_run_attempts: List[DecoratedTrainRunAttempt] = []
for train_run_attempt in train_run_attempts:
# TODO: Batch these together across TrainRunAttempts if needed.
decorated_train_workers: List[
DecoratedTrainWorker
] = await self._decorate_train_workers(train_run_attempt.workers)
decorated_train_run_attempt = DecoratedTrainRunAttempt.parse_obj(
{**train_run_attempt.dict(), "workers": decorated_train_workers}
)
decorated_train_run_attempts.append(decorated_train_run_attempt)
return decorated_train_run_attempts
async def _decorate_train_workers(
self, train_workers: List["TrainWorker"]
) -> List["DecoratedTrainWorker"]:
from ray.train.v2._internal.state.schema import DecoratedTrainWorker
decorated_train_workers: List[DecoratedTrainWorker] = []
actor_ids = [worker.actor_id for worker in train_workers]
logger.info(f"Getting all actor info from GCS (actor_ids={actor_ids})")
train_run_actors = await self._get_actor_infos(actor_ids)
for train_worker in train_workers:
actor = train_run_actors.get(train_worker.actor_id, None)
# Add hardware metrics to API response
if actor:
gpus = [
gpu
for gpu in actor["gpus"]
if train_worker.pid
in [process["pid"] for process in gpu["processesPids"]]
]
# Need to convert processesPids into a proper list.
# It's some weird ImmutableList structure
# We also convert the list of processes into a single item since
# an actor is only a single process and cannot match multiple
# processes.
formatted_gpus = [
{
**gpu,
"processInfo": [
process
for process in gpu["processesPids"]
if process["pid"] == train_worker.pid
][0],
}
for gpu in gpus
]
decorated_train_worker = DecoratedTrainWorker.parse_obj(
{
**train_worker.dict(),
"status": actor["state"],
"processStats": actor["processStats"],
"gpus": formatted_gpus,
}
)
else:
decorated_train_worker = DecoratedTrainWorker.parse_obj(
train_worker.dict()
)
decorated_train_workers.append(decorated_train_worker)
return decorated_train_workers
async def _get_run_status(
self, train_run: "TrainRun"
) -> Tuple["RunStatus", Optional[str]]:
from ray.train.v2._internal.state.schema import ActorStatus, RunStatus
# TODO: Move this to the TrainStateActor.
# The train run can be unexpectedly terminated before the final run
# status was updated. This could be due to errors outside of the training
# function (e.g., system failure or user interruption) that crashed the
# train controller.
# We need to detect this case and mark the train run as ABORTED.
actor_infos = await self._get_actor_infos([train_run.controller_actor_id])
controller_actor_info = actor_infos[train_run.controller_actor_id]
controller_actor_status = (
controller_actor_info.get("state") if controller_actor_info else None
)
if (
controller_actor_status == ActorStatus.DEAD
and train_run.status == RunStatus.RUNNING
):
run_status = RunStatus.ABORTED
status_detail = "Terminated due to system errors or killed by the user."
return (run_status, status_detail)
# Default to original.
return (train_run.status, train_run.status_detail)
# TODO: The next iteration of this should be "/api/train/v1/runs/v3".
# This follows the naming convention of "/api/train/{train_version}/runs/{api_version}".
# This API corresponds to the Train V1 API.
@routes.get("/api/train/v2/runs")
@dashboard_optional_utils.init_ray_and_catch_exceptions()
@DeveloperAPI
async def get_train_runs(self, req: Request) -> Response:
"""Get all TrainRunInfos for Ray Train V1."""
try:
from ray.train._internal.state.schema import TrainRunsResponse
except ImportError:
logger.exception(
"Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster."
)
return Response(
status=500,
text="Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster.",
)
stats_actor = await self.get_train_stats_actor()
if stats_actor is None:
return Response(
status=500,
text=(
"Train state data is not available. Please make sure Ray Train "
"is running and that the Train state actor is enabled by setting "
'the RAY_TRAIN_ENABLE_STATE_TRACKING environment variable to "1".'
),
)
else:
try:
train_runs = await stats_actor.get_all_train_runs.remote()
train_runs_with_details = (
await self._add_actor_status_and_update_run_status(train_runs)
)
# Sort train runs in reverse chronological order
train_runs_with_details = sorted(
train_runs_with_details,
key=lambda run: run.start_time_ms,
reverse=True,
)
job_details = await find_jobs_by_job_ids(
self.gcs_client,
self._job_info_client,
[run.job_id for run in train_runs_with_details],
)
for run in train_runs_with_details:
run.job_details = job_details.get(run.job_id)
details = TrainRunsResponse(train_runs=train_runs_with_details)
except ray.exceptions.RayTaskError as e:
# Task failure sometimes are due to GCS
# failure. When GCS failed, we expect a longer time
# to recover.
return Response(
status=503,
text=(
"Failed to get a response from the train stats actor. "
f"The GCS may be down, please retry later: {e}"
),
)
return Response(
text=details.json(),
content_type="application/json",
)
async def _get_actor_infos(self, actor_ids: List[str]):
if self._node_head_http_session is None:
self._node_head_http_session = get_http_session_to_module(
"NodeHead", self._config.socket_dir, self._config.session_name
)
actor_ids_qs_str = ",".join(actor_ids)
url = f"http://localhost/logical/actors?ids={actor_ids_qs_str}&nocache=1"
async with self._node_head_http_session.get(url) as resp:
resp.raise_for_status()
resp_json = await resp.json()
return resp_json["data"]["actors"]
async def _add_actor_status_and_update_run_status(self, train_runs):
from ray.train._internal.state.schema import (
ActorStatusEnum,
RunStatusEnum,
TrainRunInfoWithDetails,
TrainWorkerInfoWithDetails,
)
train_runs_with_details: List[TrainRunInfoWithDetails] = []
for train_run in train_runs.values():
worker_infos_with_details: List[TrainWorkerInfoWithDetails] = []
actor_ids = [worker.actor_id for worker in train_run.workers]
logger.info(f"Getting all actor info from GCS (actor_ids={actor_ids})")
train_run_actors = await self._get_actor_infos(actor_ids)
for worker_info in train_run.workers:
actor = train_run_actors.get(worker_info.actor_id, None)
# Add hardware metrics to API response
if actor:
gpus = [
gpu
for gpu in actor["gpus"]
if worker_info.pid
in [process["pid"] for process in gpu["processesPids"]]
]
# Need to convert processesPids into a proper list.
# It's some weird ImmutableList structureo
# We also convert the list of processes into a single item since
# an actor is only a single process and cannot match multiple
# processes.
formatted_gpus = [
{
**gpu,
"processInfo": [
process
for process in gpu["processesPids"]
if process["pid"] == worker_info.pid
][0],
}
for gpu in gpus
]
worker_info_with_details = TrainWorkerInfoWithDetails.parse_obj(
{
**worker_info.dict(),
"status": actor["state"],
"processStats": actor["processStats"],
"gpus": formatted_gpus,
}
)
else:
worker_info_with_details = TrainWorkerInfoWithDetails.parse_obj(
worker_info.dict()
)
worker_infos_with_details.append(worker_info_with_details)
train_run_with_details = TrainRunInfoWithDetails.parse_obj(
{**train_run.dict(), "workers": worker_infos_with_details}
)
# The train run can be unexpectedly terminated before the final run
# status was updated. This could be due to errors outside of the training
# function (e.g., system failure or user interruption) that crashed the
# train controller.
# We need to detect this case and mark the train run as ABORTED.
actor = train_run_actors.get(train_run.controller_actor_id)
controller_actor_status = actor.get("state") if actor else None
if (
controller_actor_status == ActorStatusEnum.DEAD
and train_run.run_status == RunStatusEnum.RUNNING
):
train_run_with_details.run_status = RunStatusEnum.ABORTED
train_run_with_details.status_detail = (
"Terminated due to system errors or killed by the user."
)
train_runs_with_details.append(train_run_with_details)
return train_runs_with_details
async def run(self):
await super().run()
if not self._job_info_client:
self._job_info_client = JobInfoStorageClient(self.gcs_client)
gcs_channel = self.aiogrpc_gcs_channel
self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub(
gcs_channel
)
async def get_train_stats_actor(self):
"""
Gets the train stats actor and caches it as an instance variable.
"""
try:
from ray.train._internal.state.state_actor import get_state_actor
if self._train_stats_actor is None:
self._train_stats_actor = get_state_actor()
return self._train_stats_actor
except ImportError:
logger.exception(
"Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster."
)
return None
async def get_train_v2_state_actor(self):
"""
Gets the Train state actor and caches it as an instance variable.
"""
try:
from ray.train.v2._internal.state.state_actor import get_state_actor
if self._train_v2_state_actor is None:
self._train_v2_state_actor = get_state_actor()
return self._train_v2_state_actor
except ImportError:
logger.exception(
"Train is not installed. Please run `pip install ray[train]` "
"when setting up Ray on your cluster."
)
return None
| TrainHead |
python | spyder-ide__spyder | spyder/plugins/editor/panels/codefolding.py | {
"start": 1525,
"end": 32075
} | class ____(Panel):
"""
Displays the document outline and lets the user collapse/expand blocks.
The data represented by the panel come from the Language Server Protocol
invoked via the CodeEditor. This panel stores information about both
folding regions and their folding state.
"""
#: signal emitted when a fold trigger state has changed, parameters are
#: the concerned text block and the new state (collapsed or not).
trigger_state_changed = Signal(QTextBlock, bool)
collapse_all_triggered = Signal()
expand_all_triggered = Signal()
def __init__(self):
Panel.__init__(self)
self.collapsed_icon = ima.icon('folding.arrow_right')
self.uncollapsed_icon = ima.icon('folding.arrow_down')
self._block_nbr = -1
self._highlight_caret = False
self.highlight_caret_scope = False
#: the list of deco used to highlight the current fold region (
#: surrounding regions are darker)
self._scope_decos = []
#: the list of folded block decorations
self._block_decos = {}
self.setMouseTracking(True)
self.scrollable = True
self._mouse_over_line = None
self._current_scope = None
self._display_folding = False
self._key_pressed = False
self._highlight_runner = DelayJobRunner(delay=250)
self.current_tree = IntervalTree()
self.root = FoldingRegion(None, None)
self.folding_regions = {}
self.folding_status = {}
self.folding_levels = {}
self.folding_nesting = {}
@property
def highlight_caret_scope(self):
"""
True to highlight the caret scope automatically.
(Similar to the ``Highlight blocks in Qt Creator``.
Default is False.
"""
return self._highlight_caret
@highlight_caret_scope.setter
def highlight_caret_scope(self, value):
if value != self._highlight_caret:
self._highlight_caret = value
if self.editor:
if value:
self._block_nbr = -1
self.editor.cursorPositionChanged.connect(
self._highlight_caret_scope)
else:
self._block_nbr = -1
self.editor.cursorPositionChanged.disconnect(
self._highlight_caret_scope)
for clone in self.editor.clones:
try:
clone.modes.get(
self.__class__).highlight_caret_scope = value
except KeyError:
# this should never happen since we're working with
# clones
pass
def update_folding(self, folding_info):
"""Update folding panel folding ranges."""
if folding_info is None:
return
(self.current_tree, self.root,
self.folding_regions, self.folding_nesting,
self.folding_levels, self.folding_status) = folding_info
self._clear_block_decos()
self.update()
def sizeHint(self):
"""Returns the widget size hint (based on the editor font size) """
fm = QFontMetricsF(self.editor.font())
size_hint = QSize(ceil(fm.height()), ceil(fm.height()))
if size_hint.width() > 16:
size_hint.setWidth(16)
return size_hint
def _draw_collapsed_indicator(self, line_number, top_position, block,
painter, mouse_hover=False):
if line_number in self.folding_regions:
collapsed = self.folding_status[line_number]
if not mouse_hover:
self._draw_fold_indicator(top_position, collapsed, painter)
if collapsed:
if mouse_hover:
self._draw_fold_indicator(top_position, collapsed, painter)
elif not mouse_hover:
for deco_line in list(self._block_decos.keys()):
deco = self._block_decos[deco_line]
# Check if the block decoration has been removed, it
# might have been unfolded by the parent
# editor/document in the case of cloned editor
if deco.block == block:
# remove it and
self._block_decos.pop(deco_line)
self.editor.decorations.remove(deco, key='folded')
del deco
break
def highlight_folded_regions(self):
"""Highlight folded regions on the editor's visible buffer."""
first_block_nb, last_block_nb = self.editor.get_buffer_block_numbers()
# This can happen at startup, and when it does, we don't need to move
# pass this point.
if first_block_nb == last_block_nb:
return
for block_number in range(first_block_nb, last_block_nb):
block = self.editor.document().findBlockByNumber(block_number)
line_number = block_number + 1
if line_number in self.folding_regions:
collapsed = self.folding_status[line_number]
# Check if a block is folded by UI inspection.
# This is necesary because the algorithm that detects the
# currently folded regions may fail, for instance, after
# pasting a big chunk of code.
ui_collapsed = (
block.isVisible() and not block.next().isVisible()
)
if collapsed != ui_collapsed:
collapsed = ui_collapsed
self.folding_status[line_number] = ui_collapsed
if collapsed:
# Check if the block already has a decoration,
# it might have been folded by the parent
# editor/document in the case of cloned editor
for deco_line in self._block_decos:
deco = self._block_decos[deco_line]
if deco.block == block:
# no need to add a deco, just go to the
# next block
break
else:
line_end = self.folding_regions[line_number]
self._add_fold_decoration(block, line_end)
def paintEvent(self, event):
"""
Paint fold indicators on the folding panel and possible folding region
background on the editor.
"""
super().paintEvent(event)
painter = QPainter(self)
self.paint_cell(painter)
# Draw collapsed indicators
if not self._display_folding and not self._key_pressed:
for top_position, line_number, block in self.editor.visible_blocks:
if self.folding_status.get(line_number):
self._draw_collapsed_indicator(
line_number, top_position, block,
painter, mouse_hover=True)
return
# Draw background over the selected non collapsed fold region
if self._mouse_over_line is not None:
block = self.editor.document().findBlockByNumber(
self._mouse_over_line)
try:
self._draw_fold_region_background(block, painter)
except (ValueError, KeyError):
# Catching the KeyError above is necessary to avoid
# issue spyder-ide/spyder#10918.
# It happens when users have the mouse on top of the
# folding panel and make some text modifications
# that trigger a folding recomputation.
pass
# Draw all fold indicators
for top_position, line_number, block in self.editor.visible_blocks:
self._draw_collapsed_indicator(
line_number, top_position, block, painter, mouse_hover=False)
def _draw_fold_region_background(self, block, painter):
"""
Draw the fold region when the mouse is over and non collapsed
indicator.
:param top: Top position
:param block: Current block.
:param painter: QPainter
"""
th = TextHelper(self.editor)
start = block.blockNumber()
end = self.folding_regions[start]
if start > 0:
top = th.line_pos_from_number(start)
else:
top = 0
bottom = th.line_pos_from_number(end)
h = bottom - top
if h == 0:
h = self.sizeHint().height()
w = self.sizeHint().width()
self._draw_rect(QRectF(0, top, w, h), painter)
def _draw_rect(self, rect, painter):
"""
Draw the background rectangle using the current style primitive color.
:param rect: The fold zone rect to draw
:param painter: The widget's painter.
"""
c = self.editor.sideareas_color
grad = QLinearGradient(rect.topLeft(), rect.topRight())
if is_dark_interface():
grad.setColorAt(0, c.lighter(110))
grad.setColorAt(1, c.lighter(130))
outline = c.darker(100)
else:
grad.setColorAt(0, c.darker(105))
grad.setColorAt(1, c.darker(115))
outline = c.lighter(110)
painter.fillRect(rect, grad)
painter.setPen(QPen(outline))
painter.drawLine(rect.topLeft() +
QPointF(1, 0),
rect.topRight() -
QPointF(1, 0))
painter.drawLine(rect.bottomLeft() +
QPointF(1, 0),
rect.bottomRight() -
QPointF(1, 0))
painter.drawLine(rect.topRight() +
QPointF(0, 1),
rect.bottomRight() -
QPointF(0, 1))
painter.drawLine(rect.topLeft() +
QPointF(0, 1),
rect.bottomLeft() -
QPointF(0, 1))
def _draw_fold_indicator(self, top, collapsed, painter):
"""
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
"""
rect = QRect(
0, top, self.sizeHint().width() + 2, self.sizeHint().height() + 2
)
if collapsed:
icon = self.collapsed_icon
else:
icon = self.uncollapsed_icon
icon.paint(painter, rect)
def find_parent_scope(self, block):
"""Find parent scope, if the block is not a fold trigger."""
block_line = block.blockNumber()
if block_line not in self.folding_regions:
for start_line in self.folding_regions:
end_line = self.folding_regions[start_line]
if end_line > block_line:
if start_line < block_line:
block = self.editor.document().findBlockByNumber(
start_line)
break
return block
def _clear_scope_decos(self):
"""Clear scope decorations (on the editor)"""
for deco in self._scope_decos:
self.editor.decorations.remove(deco)
self._scope_decos[:] = []
def _get_scope_highlight_color(self):
"""
Gets the base scope highlight color (derivated from the editor
background)
For lighter themes will be a darker color,
and for darker ones will be a lighter color
"""
color = self.editor.sideareas_color
if is_dark_interface():
color = drift_color(color, 130)
else:
color = drift_color(color, 105)
return color
def _decorate_block(self, start, end):
"""
Create a decoration and add it to the editor.
Args:
start (int) start line of the decoration
end (int) end line of the decoration
"""
color = self._get_scope_highlight_color()
draw_order = DRAW_ORDERS.get('folding_areas')
d = TextDecoration(self.editor.document(),
start_line=max(0, start - 1),
end_line=end,
draw_order=draw_order)
d.set_background(color)
d.set_full_width(True, clear=False)
self.editor.decorations.add(d)
self._scope_decos.append(d)
def _highlight_block(self, block):
"""
Highlights the current fold scope.
:param block: Block that starts the current fold scope.
"""
block_line = block.blockNumber()
end_line = self.folding_regions[block_line]
scope = (block_line, end_line)
if (self._current_scope is None or self._current_scope != scope):
self._current_scope = scope
self._clear_scope_decos()
# highlight current scope with darker or lighter color
start, end = scope
if not self.folding_status[start]:
self._decorate_block(start, end)
def mouseMoveEvent(self, event):
"""
Detect mouser over indicator and highlight the current scope in the
editor (up and down decoration arround the foldable text when the mouse
is over an indicator).
:param event: event
"""
super().mouseMoveEvent(event)
th = TextHelper(self.editor)
line = th.line_nbr_from_position(event.pos().y())
if line >= 0:
block = self.editor.document().findBlockByNumber(line)
block = self.find_parent_scope(block)
line_number = block.blockNumber()
if line_number in self.folding_regions:
if self._mouse_over_line is None:
# mouse enter fold scope
QApplication.setOverrideCursor(
QCursor(Qt.PointingHandCursor))
if (self._mouse_over_line != block.blockNumber() and
self._mouse_over_line is not None):
# fold scope changed, a previous block was highlighter so
# we quickly update our highlighting
self._mouse_over_line = block.blockNumber()
try:
self._highlight_block(block)
except KeyError:
# Catching the KeyError above is necessary to avoid
# issue spyder-ide/spyder#13230.
pass
else:
# same fold scope, request highlight
self._mouse_over_line = block.blockNumber()
try:
self._highlight_runner.request_job(
self._highlight_block, block)
except KeyError:
# Catching the KeyError above is necessary to avoid
# issue spyder-ide/spyder#11291.
pass
self._highight_block = block
else:
# no fold scope to highlight, cancel any pending requests
self._highlight_runner.cancel_requests()
self._mouse_over_line = None
QApplication.restoreOverrideCursor()
self.repaint()
def enterEvent(self, event):
self._display_folding = True
self.repaint()
def leaveEvent(self, event):
"""
Removes scope decorations and background from the editor and the panel
if highlight_caret_scope, else simply update the scope decorations to
match the caret scope.
"""
super().leaveEvent(event)
QApplication.restoreOverrideCursor()
self._highlight_runner.cancel_requests()
if not self.highlight_caret_scope:
self._clear_scope_decos()
self._mouse_over_line = None
self._current_scope = None
else:
self._block_nbr = -1
self._highlight_caret_scope()
self.editor.repaint()
self._display_folding = False
def _add_fold_decoration(self, block, end_line):
"""
Add fold decorations (boxes arround a folded block in the editor
widget).
"""
start_line = block.blockNumber()
text = self.editor.get_text_region(start_line + 1, end_line)
draw_order = DRAW_ORDERS.get('folded_regions')
deco = TextDecoration(block, draw_order=draw_order)
deco.signals.clicked.connect(self._on_fold_deco_clicked)
deco.tooltip = text
deco.block = block
deco.select_line()
deco.set_background(self._get_scope_highlight_color())
deco.set_full_width(flag=True, clear=True)
self._block_decos[start_line] = deco
self.editor.decorations.add(deco, key='folded')
def _get_block_until_line(self, block, end_line):
while block.blockNumber() <= end_line and block.isValid():
block.setVisible(False)
block = block.next()
return block
def fold_region(self, block, start_line, end_line):
"""Fold region spanned by `start_line` and `end_line`."""
# Note: The block passed to this method is the first one that needs to
# be hidden.
initial_block = self.editor.document().findBlockByNumber(
start_line - 1)
self._add_fold_decoration(initial_block, end_line)
while block.blockNumber() < end_line and block.isValid():
block.setVisible(False)
block = block.next()
def unfold_region(self, block, start_line, end_line):
"""Unfold region spanned by `start_line` and `end_line`."""
if start_line - 1 in self._block_decos:
deco = self._block_decos[start_line - 1]
self._block_decos.pop(start_line - 1)
self.editor.decorations.remove(deco, key='folded')
while block.blockNumber() < end_line and block.isValid():
current_line = block.blockNumber()
block.setVisible(True)
get_next = True
if (
current_line in self.folding_regions
and current_line != start_line
):
block_end = self.folding_regions[current_line]
if self.folding_status[current_line]:
# Skip setting visible blocks until the block is done
get_next = False
block = self._get_block_until_line(block, block_end - 1)
if get_next:
block = block.next()
def toggle_fold_trigger(self, block):
"""
Toggle a fold trigger block (expand or collapse it).
:param block: The QTextBlock to expand/collapse
"""
start_line = block.blockNumber()
if start_line not in self.folding_regions:
return
end_line = self.folding_regions[start_line]
if self.folding_status[start_line]:
self.unfold_region(block, start_line, end_line)
self.folding_status[start_line] = False
if self._mouse_over_line is not None:
self._decorate_block(start_line, end_line)
else:
self.fold_region(block, start_line, end_line)
self.folding_status[start_line] = True
self._clear_scope_decos()
self._refresh_editor_and_scrollbars()
def mousePressEvent(self, event):
"""Folds/unfolds the pressed indicator if any."""
if self._mouse_over_line is not None:
block = self.editor.document().findBlockByNumber(
self._mouse_over_line)
self.toggle_fold_trigger(block)
def _on_fold_deco_clicked(self, deco):
"""Unfold a folded block that has just been clicked by the user"""
self.toggle_fold_trigger(deco.block)
def on_state_changed(self, state):
"""
On state changed we (dis)connect to the cursorPositionChanged signal
"""
if state:
self.editor.sig_key_pressed.connect(self._on_key_pressed)
self.editor.sig_delete_requested.connect(self._expand_selection)
if self._highlight_caret:
self.editor.cursorPositionChanged.connect(
self._highlight_caret_scope)
self._block_nbr = -1
self.editor.new_text_set.connect(self._clear_block_decos)
else:
self.editor.sig_key_pressed.disconnect(self._on_key_pressed)
self.editor.sig_delete_requested.disconnect(self._expand_selection)
if self._highlight_caret:
self.editor.cursorPositionChanged.disconnect(
self._highlight_caret_scope)
self._block_nbr = -1
self.editor.new_text_set.disconnect(self._clear_block_decos)
def _in_folded_block(self):
"""Check if the current block is folded."""
cursor = self.editor.textCursor()
if cursor.hasSelection():
block_start = self.editor.document().findBlock(
cursor.selectionStart()
)
block_end = self.editor.document().findBlock(cursor.selectionEnd())
if (
# The start block needs to be among the folded ones.
block_start.blockNumber() in self._block_decos
# This covers the case when there's some text selected in the
# folded line or when it's selected in its entirety. For the
# latter, Qt returns the next block as the final one, which
# is not visible.
and (block_start == block_end or not block_end.isVisible())
):
return True
else:
return False
else:
current_block = self.editor.document().findBlock(cursor.position())
return current_block.blockNumber() in self._block_decos
def _on_key_pressed(self, event):
"""
Handle key press events in order to select a whole folded scope if the
user wants to remove it.
Notes
-----
We don't handle Key_Delete here because it's behind a shortcut in
CodeEditor. So, the event associated to that key doesn't go through its
keyPressEvent.
Instead, CodeEditor emits sig_delete_requested in the method that gets
called when Key_Delete is pressed, and in several other places, which
is handled by _expand_selection below.
"""
# This doesn't apply if there are not folded regions
if not self._block_decos:
return
if self._in_folded_block():
# We prevent the following events to change folded blocks to make
# them appear as read-only to users.
# See the last comments in spyder-ide/spyder#21669 for the details
# of this decision.
if (
# When Tab or Shift+Tab are pressed
event.key() in [Qt.Key_Tab, Qt.Key_Backtab]
# When text is trying to be written
or event.text() and event.key() != Qt.Key_Backspace
):
event.accept()
return
delete_pressed = event.key() == Qt.Key_Backspace
enter_pressed = False
cursor = self.editor.textCursor()
if cursor.hasSelection():
if event.key() == Qt.Key_Return:
enter_pressed = True
# Delete a folded scope when pressing delete or enter
if delete_pressed or enter_pressed:
self._expand_selection()
def _expand_selection(self):
"""
Expand selection to encompass a whole folded scope in case the
current selection starts and/or ends in one, or the cursor is over a
block deco.
"""
if not self._block_decos:
return
cursor = self.editor.textCursor()
self._key_pressed = True
# If there's no selected text, select the current line but only if
# it corresponds to a block deco. That allows us to remove the folded
# region associated to it when typing Delete or Backspace on the line.
# Otherwise, the editor ends up in an inconsistent state.
if not cursor.hasSelection():
current_block = self.editor.document().findBlock(cursor.position())
if current_block.blockNumber() in self._block_decos:
cursor.select(QTextCursor.LineUnderCursor)
else:
self._key_pressed = False
return
# Get positions to check if we need to expand the current selection to
# cover a folded region too.
start_pos = cursor.selectionStart()
end_pos = cursor.selectionEnd()
# A selection can end in an eol when calling CodeEditor.delete_line,
# for instance. In that case, we need to remove it for the code below
# to work as expected.
if cursor.selectedText()[-1] in EOL_SYMBOLS:
end_pos -= 1
positions_to_check = (start_pos, end_pos)
for pos in positions_to_check:
block = self.editor.document().findBlock(pos)
start_line = block.blockNumber() + 1
if (
start_line in self.folding_regions
and self.folding_status[start_line]
):
end_line = self.folding_regions[start_line]
if cursor.hasSelection():
tc = TextHelper(self.editor).select_lines(
start_line, end_line)
tc.movePosition(tc.MoveOperation.NextBlock,
tc.MoveMode.KeepAnchor)
if tc.selectionStart() > cursor.selectionStart():
start = cursor.selectionStart()
else:
start = tc.selectionStart()
if tc.selectionEnd() < cursor.selectionEnd():
end = cursor.selectionEnd()
else:
end = tc.selectionEnd()
tc.setPosition(start)
tc.setPosition(end, tc.KeepAnchor)
self.editor.setTextCursor(tc)
self._update_block_decos(start_pos, end_pos)
self._key_pressed = False
def _refresh_editor_and_scrollbars(self):
"""
Refrehes editor content and scollbars.
We generate a fake resize event to refresh scroll bar.
We have the same problem as described here:
http://www.qtcentre.org/threads/44803 and we apply the same solution
(don't worry, there is no visual effect, the editor does not grow up
at all, even with a value = 500)
"""
TextHelper(self.editor).mark_whole_doc_dirty()
self.editor.repaint()
s = self.editor.size()
s.setWidth(s.width() + 1)
self.editor.resizeEvent(QResizeEvent(self.editor.size(), s))
def collapse_all(self):
"""
Collapses all triggers and makes all blocks with fold level > 0
invisible.
"""
self._clear_block_decos()
block = self.editor.document().firstBlock()
while block.isValid():
line_number = block.blockNumber()
if line_number in self.folding_regions:
end_line = self.folding_regions[line_number]
self.fold_region(block, line_number, end_line)
block = block.next()
self._refresh_editor_and_scrollbars()
tc = self.editor.textCursor()
tc.movePosition(tc.Start)
self.editor.setTextCursor(tc)
self.collapse_all_triggered.emit()
def _clear_block_decos(self):
"""Clear the folded block decorations."""
self.editor.decorations.remove_key('folded')
self._block_decos = {}
def _update_block_decos(self, start_pos, end_pos):
"""
Update block decorations in case some are going to be removed by the
user.
Parameters
----------
start_pos: int
Start cursor position of the selection that's going to remove or
replace text in the editor
end_pos: int
End cursor position of the same selection.
"""
start_line = self.editor.document().findBlock(start_pos).blockNumber()
end_line = self.editor.document().findBlock(end_pos).blockNumber()
for deco_line in self._block_decos.copy():
if start_line <= deco_line <= end_line:
deco = self._block_decos[deco_line]
self._block_decos.pop(deco_line)
self.editor.decorations.remove(deco, key='folded')
self.folding_status[deco_line + 1] = False
def expand_all(self):
"""Expands all fold triggers."""
block = self.editor.document().firstBlock()
while block.isValid():
line_number = block.blockNumber()
if line_number in self.folding_regions:
end_line = self.folding_regions[line_number]
self.unfold_region(block, line_number, end_line)
block = block.next()
self._clear_block_decos()
self._refresh_editor_and_scrollbars()
self.expand_all_triggered.emit()
def _highlight_caret_scope(self):
"""
Highlight the scope of the current caret position.
This get called only if :attr:`
spyder.widgets.panels.FoldingPanel.highlight_care_scope` is True.
"""
cursor = self.editor.textCursor()
block_nbr = cursor.blockNumber()
if self._block_nbr != block_nbr:
block = self.find_parent_scope(cursor.block())
line_number = block.blockNumber()
if line_number in self.folding_regions:
self._mouse_over_line = block.blockNumber()
try:
self._highlight_block(block)
except KeyError:
# Catching the KeyError above is necessary to avoid
# issue spyder-ide/spyder#13230.
pass
else:
self._clear_scope_decos()
self._block_nbr = block_nbr
| FoldingPanel |
python | jmcnamara__XlsxWriter | xlsxwriter/exceptions.py | {
"start": 322,
"end": 427
} | class ____(XlsxWriterException):
"""Base exception for all input data related errors."""
| XlsxInputError |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_add_column_with_default_app/migrations/0002_add_field_default.py | {
"start": 153,
"end": 460
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_add_column_with_default_app", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="testtable",
name="field",
field=models.IntegerField(default=0),
),
]
| Migration |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-adapter/llama_index/embeddings/adapter/base.py | {
"start": 433,
"end": 4268
} | class ____(BaseEmbedding):
"""
Adapter for any embedding model.
This is a wrapper around any embedding model that adds an adapter layer \
on top of it.
This is useful for finetuning an embedding model on a downstream task.
The embedding model can be any model - it does not need to expose gradients.
Args:
base_embed_model (BaseEmbedding): Base embedding model.
adapter_path (str): Path to adapter.
adapter_cls (Optional[Type[Any]]): Adapter class. Defaults to None, in which \
case a linear adapter is used.
transform_query (bool): Whether to transform query embeddings. Defaults to True.
device (Optional[str]): Device to use. Defaults to None.
embed_batch_size (int): Batch size for embedding. Defaults to 10.
callback_manager (Optional[CallbackManager]): Callback manager. \
Defaults to None.
"""
_base_embed_model: BaseEmbedding = PrivateAttr()
_adapter: Any = PrivateAttr()
_transform_query: bool = PrivateAttr()
_device: Optional[str] = PrivateAttr()
_target_device: Any = PrivateAttr()
def __init__(
self,
base_embed_model: BaseEmbedding,
adapter_path: str,
adapter_cls: Optional[Type[Any]] = None,
transform_query: bool = True,
device: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""Init params."""
import torch
from llama_index.embeddings.adapter.utils import BaseAdapter, LinearLayer
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=f"Adapter for {base_embed_model.model_name}",
)
if device is None:
device = infer_torch_device()
logger.info(f"Use pytorch device: {device}")
self._target_device = torch.device(device)
self._base_embed_model = base_embed_model
if adapter_cls is None:
adapter_cls = LinearLayer
else:
adapter_cls = cast(Type[BaseAdapter], adapter_cls)
adapter = adapter_cls.load(adapter_path)
self._adapter = cast(BaseAdapter, adapter)
self._adapter.to(self._target_device)
self._transform_query = transform_query
@classmethod
def class_name(cls) -> str:
return "AdapterEmbeddingModel"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = self._base_embed_model._get_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
import torch
query_embedding = await self._base_embed_model._aget_query_embedding(query)
if self._transform_query:
query_embedding_t = torch.tensor(query_embedding).to(self._target_device)
query_embedding_t = self._adapter.forward(query_embedding_t)
query_embedding = query_embedding_t.tolist()
return query_embedding
def _get_text_embedding(self, text: str) -> List[float]:
return self._base_embed_model._get_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
return await self._base_embed_model._aget_text_embedding(text)
# Maintain for backwards compatibility
LinearAdapterEmbeddingModel = AdapterEmbeddingModel
| AdapterEmbeddingModel |
python | huggingface__transformers | src/transformers/models/beit/modeling_beit.py | {
"start": 54822,
"end": 59674
} | class ____(BeitPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = BeitEmbeddings(config)
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
if config.add_fpn:
if len(self.config.out_indices) != 4:
raise ValueError(
"BeitBackbone requires config.out_indices to be a list of 4 integers, "
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
"a base-sized architecture."
)
hidden_size = config.hidden_size
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps),
nn.GELU(),
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2))
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
# initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 14, 14]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
batch_size = pixel_values.shape[0]
embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values)
resolution = pixel_values.shape[2:]
outputs = self.encoder(
embedding_output,
output_hidden_states=True,
output_attentions=output_attentions,
resolution=resolution,
return_dict=return_dict,
)
hidden_states = outputs.hidden_states if return_dict else outputs[1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1:, :]
hidden_state = hidden_state.permute(0, 2, 1)
hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width)
feature_maps += (hidden_state,)
if self.config.add_fpn:
feature_maps = [
self.fpn1(feature_maps[0]),
self.fpn2(feature_maps[1]),
self.fpn3(feature_maps[2]),
self.fpn4(feature_maps[3]),
]
feature_maps = tuple(feature_maps)
if not return_dict:
if output_hidden_states:
output = (feature_maps,) + outputs[1:]
else:
output = (feature_maps,) + outputs[2:]
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = [
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
"BeitBackbone",
]
| BeitBackbone |
python | coleifer__peewee | tests/sqlite.py | {
"start": 12234,
"end": 17210
} | class ____(ModelTestCase):
database = database
requires = [KeyData]
def test_schema(self):
self.assertSQL(KeyData._schema._create_table(), (
'CREATE TABLE IF NOT EXISTS "key_data" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"key" TEXT NOT NULL, '
'"data" JSON NOT NULL)'), [])
def test_create_read_update(self):
test_values = (
'simple string',
'',
1337,
0.0,
True,
False,
['foo', 'bar', ['baz', 'nug']],
{'k1': 'v1', 'k2': {'x1': 'y1', 'x2': 'y2'}},
{'a': 1, 'b': 0.0, 'c': True, 'd': False, 'e': None, 'f': [0, 1],
'g': {'h': 'ijkl'}},
)
# Create a row using the given test value. Verify we can read the value
# back from the database, and also that we can query for the row using
# the value in the WHERE clause.
for i, value in enumerate(test_values):
# We can create and re-read values.
KeyData.create(key='k%s' % i, data=value)
kd_db = KeyData.get(KeyData.key == 'k%s' % i)
self.assertEqual(kd_db.data, value)
# We can read the data back using the value in the WHERE clause.
kd_db = KeyData.get(KeyData.data == value)
self.assertEqual(kd_db.key, 'k%s' % i)
# Verify we can use values in UPDATE query.
kd = KeyData.create(key='kx', data='')
for value in test_values:
nrows = (KeyData
.update(data=value)
.where(KeyData.key == 'kx')
.execute())
self.assertEqual(nrows, 1)
kd_db = KeyData.get(KeyData.key == 'kx')
self.assertEqual(kd_db.data, value)
def test_json_unicode(self):
with self.database.atomic():
KeyData.delete().execute()
# Two Chinese characters.
unicode_str = b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf8')
data = {'foo': unicode_str}
kd = KeyData.create(key='k1', data=data)
kd_db = KeyData.get(KeyData.key == 'k1')
self.assertEqual(kd_db.data, {'foo': unicode_str})
def test_json_to_json(self):
kd1 = KeyData.create(key='k1', data={'k1': 'v1', 'k2': 'v2'})
subq = (KeyData
.select(KeyData.data)
.where(KeyData.key == 'k1'))
# Assign value using a subquery.
KeyData.create(key='k2', data=subq)
kd2_db = KeyData.get(KeyData.key == 'k2')
self.assertEqual(kd2_db.data, {'k1': 'v1', 'k2': 'v2'})
def test_json_bulk_update_top_level_list(self):
kd1 = KeyData.create(key='k1', data=['a', 'b', 'c'])
kd2 = KeyData.create(key='k2', data=['d', 'e', 'f'])
kd1.data = ['g', 'h', 'i']
kd2.data = ['j', 'k', 'l']
KeyData.bulk_update([kd1, kd2], fields=[KeyData.data])
kd1_db = KeyData.get(KeyData.key == 'k1')
kd2_db = KeyData.get(KeyData.key == 'k2')
self.assertEqual(kd1_db.data, ['g', 'h', 'i'])
self.assertEqual(kd2_db.data, ['j', 'k', 'l'])
def test_json_bulk_update_top_level_dict(self):
kd1 = KeyData.create(key='k1', data={'x': 'y1'})
kd2 = KeyData.create(key='k2', data={'x': 'y2'})
kd1.data = {'x': 'z1'}
kd2.data = {'X': 'Z2'}
KeyData.bulk_update([kd1, kd2], fields=[KeyData.data])
kd1_db = KeyData.get(KeyData.key == 'k1')
kd2_db = KeyData.get(KeyData.key == 'k2')
self.assertEqual(kd1_db.data, {'x': 'z1'})
self.assertEqual(kd2_db.data, {'X': 'Z2'})
def test_json_multi_ops(self):
data = (
('k1', [0, 1]),
('k2', [1, 2]),
('k3', {'x3': 'y3'}),
('k4', {'x4': 'y4'}))
res = KeyData.insert_many(data).execute()
if database.returning_clause:
self.assertEqual([r for r, in res], [1, 2, 3, 4])
else:
self.assertEqual(res, 4)
vals = [[1, 2], [2, 3], {'x3': 'y3'}, {'x5': 'y5'}]
pw_vals = [Value(v, unpack=False) for v in vals]
query = KeyData.select().where(KeyData.data.in_(pw_vals))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."key", "t1"."data" '
'FROM "key_data" AS "t1" '
'WHERE ("t1"."data" IN (json(?), json(?), json(?), json(?)))'),
['[1, 2]', '[2, 3]', '{"x3": "y3"}', '{"x5": "y5"}'])
self.assertEqual(query.count(), 2)
self.assertEqual(sorted([k.key for k in query]), ['k2', 'k3'])
query = KeyData.select().where(KeyData.data == [1, 2])
self.assertEqual(query.count(), 1)
self.assertEqual(query.get().key, 'k2')
query = KeyData.select().where(KeyData.data == {'x3': 'y3'})
self.assertEqual(query.count(), 1)
self.assertEqual(query.get().key, 'k3')
@skip_unless(json_installed(), 'requires sqlite json1')
| TestJSONField |
python | pdm-project__pdm | src/pdm/cli/commands/fix/fixers.py | {
"start": 186,
"end": 910
} | class ____(abc.ABC):
"""Base class for fixers"""
# A unique identifier for the fixer
identifier: str
# A boolean flag to indicate if the problem is breaking
breaking: bool = False
def __init__(self, project: Project) -> None:
self.project = project
def log(self, message: str, verbosity: Verbosity = Verbosity.DETAIL) -> None:
self.project.core.ui.echo(message, verbosity=verbosity)
@abc.abstractmethod
def get_message(self) -> str:
"""Return a description of the problem"""
@abc.abstractmethod
def fix(self) -> None:
"""Perform the fix"""
@abc.abstractmethod
def check(self) -> bool:
"""Check if the problem exists"""
| BaseFixer |
python | scikit-learn__scikit-learn | sklearn/base.py | {
"start": 34869,
"end": 36826
} | class ____:
"""Mixin class for transformers that generate their own names by prefixing.
This mixin is useful when the transformer needs to generate its own feature
names out, such as :class:`~sklearn.decomposition.PCA`. For example, if
:class:`~sklearn.decomposition.PCA` outputs 3 features, then the generated feature
names out are: `["pca0", "pca1", "pca2"]`.
This mixin assumes that a `_n_features_out` attribute is defined when the
transformer is fitted. `_n_features_out` is the number of output features
that the transformer will return in `transform` of `fit_transform`.
Examples
--------
>>> import numpy as np
>>> from sklearn.base import ClassNamePrefixFeaturesOutMixin, BaseEstimator
>>> class MyEstimator(ClassNamePrefixFeaturesOutMixin, BaseEstimator):
... def fit(self, X, y=None):
... self._n_features_out = X.shape[1]
... return self
>>> X = np.array([[1, 2], [3, 4]])
>>> MyEstimator().fit(X).get_feature_names_out()
array(['myestimator0', 'myestimator1'], dtype=object)
"""
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
The feature names out will prefixed by the lowercased class name. For
example, if the transformer outputs 3 features, then the feature names
out are: `["class_name0", "class_name1", "class_name2"]`.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in `fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "_n_features_out")
return _generate_get_feature_names_out(
self, self._n_features_out, input_features=input_features
)
| ClassNamePrefixFeaturesOutMixin |
python | django__django | tests/template_tests/syntax_tests/test_now.py | {
"start": 192,
"end": 2754
} | class ____(SimpleTestCase):
@setup({"now01": '{% now "j n Y" %}'})
def test_now01(self):
"""
Simple case
"""
output = self.engine.render_to_string("now01")
self.assertEqual(
output,
"%d %d %d"
% (
datetime.now().day,
datetime.now().month,
datetime.now().year,
),
)
# Check parsing of locale strings
@setup({"now02": '{% now "DATE_FORMAT" %}'})
def test_now02(self):
output = self.engine.render_to_string("now02")
self.assertEqual(output, date_format(datetime.now()))
@setup({"now03": "{% now 'j n Y' %}"})
def test_now03(self):
"""
#15092 - Also accept simple quotes
"""
output = self.engine.render_to_string("now03")
self.assertEqual(
output,
"%d %d %d"
% (
datetime.now().day,
datetime.now().month,
datetime.now().year,
),
)
@setup({"now04": "{% now 'DATE_FORMAT' %}"})
def test_now04(self):
output = self.engine.render_to_string("now04")
self.assertEqual(output, date_format(datetime.now()))
@setup({"now05": "{% now 'j \"n\" Y'%}"})
def test_now05(self):
output = self.engine.render_to_string("now05")
self.assertEqual(
output,
'%d "%d" %d'
% (
datetime.now().day,
datetime.now().month,
datetime.now().year,
),
)
@setup({"now06": "{% now \"j 'n' Y\"%}"})
def test_now06(self):
output = self.engine.render_to_string("now06")
self.assertEqual(
output,
"%d '%d' %d"
% (
datetime.now().day,
datetime.now().month,
datetime.now().year,
),
)
@setup({"now07": '{% now "j n Y" as N %}-{{N}}-'})
def test_now07(self):
output = self.engine.render_to_string("now07")
self.assertEqual(
output,
"-%d %d %d-"
% (
datetime.now().day,
datetime.now().month,
datetime.now().year,
),
)
@setup({"no_args": "{% now %}"})
def test_now_args(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "'now' statement takes one argument"
):
self.engine.render_to_string("no_args")
| NowTagTests |
python | pypa__packaging | tests/test_tags.py | {
"start": 19748,
"end": 33154
} | class ____:
def teardown_method(self) -> None:
# Clear the version cache
tags._manylinux._get_glibc_version.cache_clear() # type: ignore[attr-defined]
def test_get_config_var_does_not_log(self, monkeypatch: pytest.MonkeyPatch) -> None:
debug = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(tags.logger, "debug", debug)
tags._get_config_var("missing")
assert debug.calls == []
def test_get_config_var_does_log(self, monkeypatch: pytest.MonkeyPatch) -> None:
debug = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(tags.logger, "debug", debug)
tags._get_config_var("missing", warn=True)
assert debug.calls == [
pretend.call(
"Config variable '%s' is unset, Python ABI tag may be incorrect",
"missing",
)
]
@pytest.mark.parametrize(
("arch", "is_32bit", "expected"),
[
("linux-x86_64", False, ["linux_x86_64"]),
("linux-x86_64", True, ["linux_i686"]),
("linux-aarch64", False, ["linux_aarch64"]),
("linux-aarch64", True, ["linux_armv8l", "linux_armv7l"]),
],
)
def test_linux_platforms_32_64bit_on_64bit_os(
self,
arch: str,
is_32bit: bool,
expected: list[str],
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(sysconfig, "get_platform", lambda: arch)
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.20", raising=False)
monkeypatch.setattr(tags._manylinux, "_is_compatible", lambda *args: False) # type: ignore[attr-defined]
linux_platform = list(tags._linux_platforms(is_32bit=is_32bit))[
-len(expected) :
]
assert linux_platform == expected
def test_linux_platforms_manylinux_unsupported(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.20", raising=False)
monkeypatch.setattr(tags._manylinux, "_is_compatible", lambda *args: False) # type: ignore[attr-defined]
linux_platform = list(tags._linux_platforms(is_32bit=False))
assert linux_platform == ["linux_x86_64"]
def test_linux_platforms_manylinux1(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(
tags._manylinux, # type: ignore[attr-defined]
"_is_compatible",
lambda _, glibc_version: glibc_version == _GLibCVersion(2, 5),
)
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.20", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
assert platforms == [
"manylinux_2_5_x86_64",
"manylinux1_x86_64",
"linux_x86_64",
]
def test_linux_platforms_manylinux2010(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.12", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
expected = [
"manylinux_2_12_x86_64",
"manylinux2010_x86_64",
"manylinux_2_11_x86_64",
"manylinux_2_10_x86_64",
"manylinux_2_9_x86_64",
"manylinux_2_8_x86_64",
"manylinux_2_7_x86_64",
"manylinux_2_6_x86_64",
"manylinux_2_5_x86_64",
"manylinux1_x86_64",
"linux_x86_64",
]
assert platforms == expected
def test_linux_platforms_manylinux2014(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.17", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
arch = platform.machine()
expected = [
"manylinux_2_17_" + arch,
"manylinux2014_" + arch,
"manylinux_2_16_" + arch,
"manylinux_2_15_" + arch,
"manylinux_2_14_" + arch,
"manylinux_2_13_" + arch,
"manylinux_2_12_" + arch,
"manylinux2010_" + arch,
"manylinux_2_11_" + arch,
"manylinux_2_10_" + arch,
"manylinux_2_9_" + arch,
"manylinux_2_8_" + arch,
"manylinux_2_7_" + arch,
"manylinux_2_6_" + arch,
"manylinux_2_5_" + arch,
"manylinux1_" + arch,
"linux_" + arch,
]
assert platforms == expected
@pytest.mark.parametrize(
("native_arch", "cross_arch"),
[("armv7l", "armv7l"), ("armv8l", "armv8l"), ("aarch64", "armv8l")],
)
def test_linux_platforms_manylinux2014_armhf_abi(
self, native_arch: str, cross_arch: str, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(tags._manylinux, "_glibc_version_string", lambda: "2.30") # type: ignore[attr-defined]
monkeypatch.setattr(
tags._manylinux, # type: ignore[attr-defined]
"_is_compatible",
lambda _, glibc_version: glibc_version == _GLibCVersion(2, 17),
)
monkeypatch.setattr(sysconfig, "get_platform", lambda: f"linux_{native_arch}")
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__),
"manylinux",
"hello-world-armv7l-armhf",
),
)
platforms = list(tags._linux_platforms(is_32bit=True))
archs = {"armv8l": ["armv8l", "armv7l"]}.get(cross_arch, [cross_arch])
expected = []
for arch in archs:
expected.extend([f"manylinux_2_17_{arch}", f"manylinux2014_{arch}"])
expected.extend(f"linux_{arch}" for arch in archs)
assert platforms == expected
def test_linux_platforms_manylinux2014_i386_abi(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(tags._manylinux, "_glibc_version_string", lambda: "2.17") # type: ignore[attr-defined]
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_x86_64")
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__),
"manylinux",
"hello-world-x86_64-i386",
),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = [
"manylinux_2_17_i686",
"manylinux2014_i686",
"manylinux_2_16_i686",
"manylinux_2_15_i686",
"manylinux_2_14_i686",
"manylinux_2_13_i686",
"manylinux_2_12_i686",
"manylinux2010_i686",
"manylinux_2_11_i686",
"manylinux_2_10_i686",
"manylinux_2_9_i686",
"manylinux_2_8_i686",
"manylinux_2_7_i686",
"manylinux_2_6_i686",
"manylinux_2_5_i686",
"manylinux1_i686",
"linux_i686",
]
assert platforms == expected
def test_linux_platforms_manylinux_glibc3(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# test for a future glic 3.x version
monkeypatch.setattr(tags._manylinux, "_glibc_version_string", lambda: "3.2") # type: ignore[attr-defined]
monkeypatch.setattr(tags._manylinux, "_is_compatible", lambda *args: True) # type: ignore[attr-defined]
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_aarch64")
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__),
"manylinux",
"hello-world-aarch64",
),
)
platforms = list(tags._linux_platforms(is_32bit=False))
expected = (
["manylinux_3_2_aarch64", "manylinux_3_1_aarch64", "manylinux_3_0_aarch64"]
+ [f"manylinux_2_{i}_aarch64" for i in range(50, 16, -1)]
+ ["manylinux2014_aarch64", "linux_aarch64"]
)
assert platforms == expected
@pytest.mark.parametrize(
("native_arch", "cross32_arch", "musl_version"),
[
("armv7l", "armv7l", _MuslVersion(1, 1)),
("aarch64", "armv8l", _MuslVersion(1, 1)),
("i386", "i386", _MuslVersion(1, 2)),
("x86_64", "i686", _MuslVersion(1, 2)),
],
)
@pytest.mark.parametrize("cross32", [True, False], ids=["cross", "native"])
def test_linux_platforms_musllinux(
self,
monkeypatch: pytest.MonkeyPatch,
native_arch: str,
cross32_arch: str,
musl_version: _MuslVersion,
cross32: bool,
) -> None:
fake_executable = str(
pathlib.Path(__file__)
.parent.joinpath("musllinux", f"musl-{native_arch}")
.resolve()
)
monkeypatch.setattr(tags._musllinux.sys, "executable", fake_executable) # type: ignore[attr-defined]
monkeypatch.setattr(sysconfig, "get_platform", lambda: f"linux_{native_arch}")
monkeypatch.setattr(tags._manylinux, "platform_tags", lambda *_: ()) # type: ignore[attr-defined]
recorder = pretend.call_recorder(lambda _: musl_version)
monkeypatch.setattr(tags._musllinux, "_get_musl_version", recorder) # type: ignore[attr-defined]
platforms = list(tags._linux_platforms(is_32bit=cross32))
target_arch = cross32_arch if cross32 else native_arch
archs = {"armv8l": ["armv8l", "armv7l"]}.get(target_arch, [target_arch])
expected: list[str] = []
for arch in archs:
expected.extend(
f"musllinux_{musl_version[0]}_{minor}_{arch}"
for minor in range(musl_version[1], -1, -1)
)
expected.extend(f"linux_{arch}" for arch in archs)
assert platforms == expected
assert recorder.calls == [pretend.call(fake_executable)]
def test_linux_platforms_manylinux2014_armv6l(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(
tags._manylinux, # type: ignore[attr-defined]
"_is_compatible",
lambda _, glibc_version: glibc_version == _GLibCVersion(2, 17),
)
monkeypatch.setattr(sysconfig, "get_platform", lambda: "linux_armv6l")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.20", raising=False)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = ["linux_armv6l"]
assert platforms == expected
@pytest.mark.parametrize(
("machine", "abi", "alt_machine"),
[("x86_64", "x32", "i686"), ("armv7l", "armel", "armv7l")],
)
def test_linux_platforms_not_manylinux_abi(
self, monkeypatch: pytest.MonkeyPatch, machine: str, abi: str, alt_machine: str
) -> None:
monkeypatch.setattr(tags._manylinux, "_is_compatible", lambda *args: False) # type: ignore[attr-defined]
monkeypatch.setattr(sysconfig, "get_platform", lambda: f"linux_{machine}")
monkeypatch.setattr(
sys,
"executable",
os.path.join(
os.path.dirname(__file__),
"manylinux",
f"hello-world-{machine}-{abi}",
),
)
platforms = list(tags._linux_platforms(is_32bit=True))
expected = [f"linux_{alt_machine}"]
assert platforms == expected
def test_linux_not_linux(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(sysconfig, "get_platform", lambda: "not_linux_x86_64")
monkeypatch.setattr(platform, "machine", lambda: "x86_64")
monkeypatch.setattr(os, "confstr", lambda _: "glibc 2.17", raising=False)
platforms = list(tags._linux_platforms(is_32bit=False))
assert platforms == ["not_linux_x86_64"]
@pytest.mark.parametrize(
("platform_name", "dispatch_func"),
[
("Darwin", "mac_platforms"),
("iOS", "ios_platforms"),
("Android", "android_platforms"),
("Linux", "_linux_platforms"),
("Generic", "_generic_platforms"),
],
)
def test_platform_tags(
platform_name: str, dispatch_func: str, monkeypatch: pytest.MonkeyPatch
) -> None:
expected = ["sillywalk"]
monkeypatch.setattr(platform, "system", lambda: platform_name)
monkeypatch.setattr(tags, dispatch_func, lambda: expected)
assert list(tags.platform_tags()) == expected
def test_platform_tags_space(monkeypatch: pytest.MonkeyPatch) -> None:
"""Ensure spaces in platform tags are normalized to underscores."""
monkeypatch.setattr(platform, "system", lambda: "Isilon OneFS")
monkeypatch.setattr(sysconfig, "get_platform", lambda: "isilon onefs")
assert list(tags.platform_tags()) == ["isilon_onefs"]
| TestManylinuxPlatform |
python | walkccc__LeetCode | solutions/1402. Reducing Dishes/1402.py | {
"start": 0,
"end": 285
} | class ____:
def maxSatisfaction(self, satisfaction: list[int]) -> int:
ans = 0
sumSatisfaction = 0
for s in sorted(satisfaction, reverse=True):
sumSatisfaction += s
if sumSatisfaction <= 0:
return ans
ans += sumSatisfaction
return ans
| Solution |
python | etianen__django-reversion | tests/test_app/tests/test_commands.py | {
"start": 3055,
"end": 3352
} | class ____(TestModelMixin, TestBase):
def testCreateInitialRevisionsComment(self):
obj = TestModel.objects.create()
self.callCommand("createinitialrevisions", comment="comment v1")
self.assertSingleRevision((obj,), comment="comment v1")
| CreateInitialRevisionsCommentTest |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 7956,
"end": 8211
} | class ____(Exception):
pass
# TODO: I'm a little uncertain about what error classification we should have
# for this. This is potentially a user error, but regressions in
# specialization in PyTorch proper could also trigger this problem
| IncorrectUsage |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 43327,
"end": 47380
} | class ____(Interface):
def get(category_name, discriminator, default=None):
"""Get the IIntrospectable related to the category_name and the
discriminator (or discriminator hash) ``discriminator``. If it does
not exist in the introspector, return the value of ``default``"""
def get_category(category_name, default=None, sort_key=None):
"""Get a sequence of dictionaries in the form
``[{'introspectable':IIntrospectable, 'related':[sequence of related
IIntrospectables]}, ...]`` where each introspectable is part of the
category associated with ``category_name`` .
If the category named ``category_name`` does not exist in the
introspector the value passed as ``default`` will be returned.
If ``sort_key`` is ``None``, the sequence will be returned in the
order the introspectables were added to the introspector. Otherwise,
sort_key should be a function that accepts an IIntrospectable and
returns a value from it (ala the ``key`` function of Python's
``sorted`` callable)."""
def categories():
"""Return a sorted sequence of category names known by
this introspector"""
def categorized(sort_key=None):
"""Get a sequence of tuples in the form ``[(category_name,
[{'introspectable':IIntrospectable, 'related':[sequence of related
IIntrospectables]}, ...])]`` representing all known
introspectables. If ``sort_key`` is ``None``, each introspectables
sequence will be returned in the order the introspectables were added
to the introspector. Otherwise, sort_key should be a function that
accepts an IIntrospectable and returns a value from it (ala the
``key`` function of Python's ``sorted`` callable)."""
def remove(category_name, discriminator):
"""Remove the IIntrospectable related to ``category_name`` and
``discriminator`` from the introspector, and fix up any relations
that the introspectable participates in. This method will not raise
an error if an introspectable related to the category name and
discriminator does not exist."""
def related(intr):
"""Return a sequence of IIntrospectables related to the
IIntrospectable ``intr``. Return the empty sequence if no relations
for exist."""
def add(intr):
"""Add the IIntrospectable ``intr`` (use instead of
:meth:`pyramid.interfaces.IIntrospector.add` when you have a custom
IIntrospectable). Replaces any existing introspectable registered
using the same category/discriminator.
This method is not typically called directly, instead it's called
indirectly by :meth:`pyramid.interfaces.IIntrospector.register`"""
def relate(*pairs):
"""Given any number of ``(category_name, discriminator)`` pairs
passed as positional arguments, relate the associated introspectables
to each other. The introspectable related to each pair must have
already been added via ``.add`` or ``.add_intr``; a :exc:`KeyError`
will result if this is not true. An error will not be raised if any
pair has already been associated with another.
This method is not typically called directly, instead it's called
indirectly by :meth:`pyramid.interfaces.IIntrospector.register`
"""
def unrelate(*pairs):
"""Given any number of ``(category_name, discriminator)`` pairs
passed as positional arguments, unrelate the associated introspectables
from each other. The introspectable related to each pair must have
already been added via ``.add`` or ``.add_intr``; a :exc:`KeyError`
will result if this is not true. An error will not be raised if any
pair is not already related to another.
This method is not typically called directly, instead it's called
indirectly by :meth:`pyramid.interfaces.IIntrospector.register`
"""
| IIntrospector |
python | pytorch__pytorch | benchmarks/dynamo/microbenchmarks/operator_inp_utils.py | {
"start": 4602,
"end": 7058
} | class ____(TorchDispatchMode):
def __init__(self, func_db=None):
self.func_db = defaultdict(Counter) if func_db is None else func_db
def __torch_dispatch__(self, func_overload, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
arg_meta, kwarg_meta = tree_map(serialize_torch_args, (args, kwargs))
out = func_overload(*args, **kwargs)
inps = (args, kwargs)
if contains_tensor(inps) and not skip_args(inps) and contains_tensor(out):
serialized_str = repr((arg_meta, kwarg_meta))
self.func_db[str(func_overload)][serialized_str] += 1
return out
def log_to_file(self, output_filename, *, skip_non_compute_operators=True):
sorted_operators = sorted(self.func_db.keys())
with open(output_filename, "w") as f:
for operator in sorted_operators:
if skip_non_compute_operators and non_compute_operator(eval(operator)):
continue
f.write(f"Operator: {operator}\n")
operator_inputs = self.func_db[operator]
for inps, count in operator_inputs.items():
f.write(f"cnt: {count}, ")
# repr will add quotation marks around the dtype strings
for dtype_abbr in dtype_abbrs.values():
inps = inps.replace("'" + dtype_abbr + "'", dtype_abbr)
f.write(inps)
f.write("\n")
def map_to_device(e, device):
if isinstance(e, torch.Tensor):
return e.to(device)
elif isinstance(e, torch.device):
return device
elif isinstance(e, str):
if e == "cuda" or e == "cpu":
return device.type
else:
return e
def map_to_dtype(e, dtype):
if isinstance(e, torch.Tensor) and e.is_floating_point():
return e.to(dtype)
elif isinstance(e, torch.dtype):
return dtype
else:
return e
def deserialize_args(inps):
inps = inps.strip().strip("'")
global_vals = {
"T": deserialize_tensor,
"ST": deserialize_sparse_tensor,
"th": torch,
"inf": math.inf,
"torch": torch,
**dtype_abbrs_parsing,
}
# f strings introduce quotations we dont want
for key in dtype_abbrs_parsing:
inps = inps.replace(f"'{key}'", key)
return eval(inps.strip().strip("'").strip('"'), global_vals)
| OperatorInputsMode |
python | fastai__fastai | fastai/callback/fp16.py | {
"start": 790,
"end": 6619
} | class ____(Callback):
"Mixed precision training using Pytorch's Automatic Mixed Precision (AMP)"
order = 10
def __init__(self,
amp_mode:str|AMPMode=AMPMode.FP16, # Mixed Precision training mode. Supports fp16 and bf16.
**kwargs
):
amp_mode = AMPMode(amp_mode)
store_attr(names='amp_mode')
self.kwargs = kwargs
def before_fit(self):
if self.amp_mode == AMPMode.BF16:
if torch.cuda.is_available() and not torch.cuda.is_bf16_supported():
raise ValueError("Unsupported GPU for bfloat16 mixed precision training")
dtype = torch.bfloat16
elif self.amp_mode == AMPMode.FP16:
dtype = torch.float16
else:
raise ValueError(f"Unrecognized precision: {self.amp_mode}")
# `GradScaler` is not needed for bfloat16 as fp32 and bf16 have the same range
self.kwargs['enabled'] = dtype == torch.float16
self.autocast,self.learn.scaler,self.scales = autocast('cuda', dtype=dtype),GradScaler('cuda', **self.kwargs),L()
def before_batch(self): self.autocast.__enter__()
def after_pred(self):
self.learn.pred = to_float(self.pred)
def after_loss(self): self.autocast.__exit__(None, None, None)
def before_backward(self): self.learn.loss_grad = self.scaler.scale(self.loss_grad)
def before_step(self):
"Use `self` as a fake optimizer. `self.skipped` will be set to True `after_step` if gradients overflow."
self.skipped=True
self.scaler.step(self)
if self.skipped: raise CancelStepException()
self.scales.append(self.scaler.get_scale())
def after_step(self): self.learn.scaler.update()
def after_fit(self): self.autocast,self.learn.scaler,self.scales = None,None,None
@property
def param_groups(self):
"Pretend to be an optimizer for `GradScaler`"
return self.opt.param_groups
def step(self, *args, **kwargs):
"Fake optimizer step to detect whether this batch was skipped from `GradScaler`"
self.skipped=False
# %% ../../nbs/18_callback.fp16.ipynb 27
@patch
@delegates(GradScaler)
def to_fp16(self:Learner, **kwargs):
"Set `Learner` to float16 mixed precision using PyTorch AMP"
return self.add_cb(MixedPrecision(**kwargs))
# %% ../../nbs/18_callback.fp16.ipynb 28
@patch
def to_bf16(self:Learner):
"Set `Learner` to bfloat16 mixed precision using PyTorch AMP"
return self.add_cb(MixedPrecision(amp_mode=AMPMode.BF16))
# %% ../../nbs/18_callback.fp16.ipynb 29
@patch
def to_fp32(self:Learner):
"Set `Learner` to float32 precision"
return self.remove_cb(MixedPrecision)
# %% ../../nbs/18_callback.fp16.ipynb 32
from ..fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params
# %% ../../nbs/18_callback.fp16.ipynb 38
from torch.nn.utils import parameters_to_vector
# %% ../../nbs/18_callback.fp16.ipynb 39
def get_master(
opt:Optimizer, # Optimizer from which to retrieve model params
flat_master:bool=False, # Flatten fp32 params into a vector for better performance
) -> list: # List of fp16 params, and list of fp32 params
"Creates fp16 model params given an initialized `Optimizer`, also returning fp32 model params. "
model_params = [[param for param in pg if getattr(param, 'requires_grad', False) and hasattr(param, 'data')] for pg in opt.param_lists]
if flat_master:
master_params = []
for pg in model_params:
mp = parameters_to_vector([param.data.float() for param in pg])
mp = nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]
return model_params, master_params
# %% ../../nbs/18_callback.fp16.ipynb 44
def to_master_grads(
model_pgs:list, # Fp16 model parameters to copy gradients from
master_pgs:list, # Fp32 model parameters to copy gradients to
flat_master:bool=False, # Whether or not fp32 parameters were previously flattened
):
"Move fp16 model gradients to fp32 master gradients"
for (model_params,master_params) in zip(model_pgs,master_pgs):
model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)
# %% ../../nbs/18_callback.fp16.ipynb 48
def to_model_params(
model_pgs:list, # Fp16 model params to copy to
master_pgs:list, # Fp32 master params to copy from
flat_master:bool=False # Whether master_pgs was previously flattened
)->None:
"Copy updated fp32 master params to fp16 model params after gradient step. "
for (model_params,master_params) in zip(model_pgs,master_pgs):
master_params_to_model_params(model_params, master_params, flat_master=flat_master)
# %% ../../nbs/18_callback.fp16.ipynb 53
def test_overflow(x:torch.Tensor):
"Tests whether fp16 gradients have overflown."
s = float(x.float().sum())
return (s == float('inf') or s == float('-inf') or s != s)
# %% ../../nbs/18_callback.fp16.ipynb 56
def grad_overflow(pgs:list)->bool:
"Tests all fp16 parameters in pgs for gradient overflow"
for pg in pgs:
for p in pg:
if p.grad is not None and test_overflow(p.grad.data): return True
return False
# %% ../../nbs/18_callback.fp16.ipynb 59
def copy_clone(d):
return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}
# %% ../../nbs/18_callback.fp16.ipynb 60
def _copy_state(opt, pgs1, pgs2):
opt.param_lists = pgs2
for pg1,pg2 in zip(pgs1, pgs2):
for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))
# %% ../../nbs/18_callback.fp16.ipynb 61
| MixedPrecision |
python | doocs__leetcode | solution/0100-0199/0138.Copy List with Random Pointer/Solution2.py | {
"start": 203,
"end": 789
} | class ____:
def copyRandomList(self, head: "Optional[Node]") -> "Optional[Node]":
if head is None:
return None
cur = head
while cur:
node = Node(cur.val, cur.next)
cur.next = node
cur = node.next
cur = head
while cur:
cur.next.random = cur.random.next if cur.random else None
cur = cur.next.next
cur = head
ans = head.next
while cur.next:
node = cur.next
cur.next = node.next
cur = node
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/testing.py | {
"start": 3897,
"end": 4759
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that asserts which transformations happen next."""
def __init__(self, input_dataset, transformations):
"""See `assert_next()` for details."""
self._input_dataset = input_dataset
if transformations is None:
raise ValueError(
"Invalid `transformations`. `transformations` should not be empty.")
self._transformations = ops.convert_to_tensor(
transformations, dtype=dtypes.string, name="transformations")
variant_tensor = (
gen_experimental_dataset_ops.experimental_assert_next_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._transformations,
**self._flat_structure))
super(_AssertNextDataset, self).__init__(input_dataset, variant_tensor)
| _AssertNextDataset |
python | getsentry__sentry | tests/sentry_plugins/bitbucket/test_repository_provider.py | {
"start": 326,
"end": 2064
} | class ____(TestCase):
@cached_property
def provider(self) -> BitbucketRepositoryProvider:
return BitbucketRepositoryProvider("bitbucket")
@responses.activate
def test_compare_commits(self) -> None:
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/maxbittker/newsdiffs/commits/e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
body=COMPARE_COMMITS_EXAMPLE,
)
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/maxbittker/newsdiffs/diff/e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
body=COMMIT_DIFF_PATCH,
)
repo = Repository.objects.create(
provider="bitbucket",
name="maxbittker/newsdiffs",
organization_id=1,
config={"name": "maxbittker/newsdiffs"},
)
user = self.user
self.create_usersocialauth(
provider="bitbucket",
user=user,
uid="1",
extra_data={
"access_token": "oauth_token=oauth-token&oauth_token_secret=oauth-token-secret"
},
)
res = self.provider.compare_commits(
repo, None, "e18e4e72de0d824edfbe0d73efe34cbd0d01d301", actor=user
)
assert res == [
{
"author_email": "max@getsentry.com",
"author_name": "Max Bittker",
"message": "README.md edited online with Bitbucket",
"id": "e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
"repository": "maxbittker/newsdiffs",
"patch_set": [{"path": "README.md", "type": "M"}],
}
]
| BitbucketPluginTest |
python | pypa__hatch | src/hatch/config/model.py | {
"start": 39,
"end": 557
} | class ____(Exception):
def __init__(self, *args, location):
self.location = location
super().__init__(*args)
def __str__(self):
return f"Error parsing config:\n{self.location}\n {super().__str__()}"
def parse_config(obj):
if isinstance(obj, LazilyParsedConfig):
obj.parse_fields()
elif isinstance(obj, list):
for o in obj:
parse_config(o)
elif isinstance(obj, dict):
for o in obj.values():
parse_config(o)
| ConfigurationError |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/profiling.py | {
"start": 4470,
"end": 6383
} | class ____(Panel):
"""
Panel that displays profiling information.
"""
is_async = False
title = _("Profiling")
template = "debug_toolbar/panels/profiling.html"
capture_project_code = dt_settings.get_config()["PROFILER_CAPTURE_PROJECT_CODE"]
def process_request(self, request):
self.profiler = cProfile.Profile()
return self.profiler.runcall(super().process_request, request)
def add_node(self, func_list, func, max_depth, cum_time):
func_list.append(func)
if func.depth < max_depth:
for subfunc in func.subfuncs():
# Always include the user's code
if subfunc.stats[3] >= cum_time or (
self.capture_project_code
and subfunc.is_project_func()
and subfunc.stats[3] > 0
):
func.has_subfuncs = True
self.add_node(func_list, subfunc, max_depth, cum_time)
def generate_stats(self, request, response):
if not hasattr(self, "profiler"):
return None
# Could be delayed until the panel content is requested (perf. optim.)
self.profiler.create_stats()
self.stats = Stats(self.profiler)
self.stats.calc_callees()
root_func = cProfile.label(super().process_request.__code__)
if root_func in self.stats.stats:
root = FunctionCall(self.stats, root_func, depth=0)
func_list = []
cum_time_threshold = (
root.stats[3] / dt_settings.get_config()["PROFILER_THRESHOLD_RATIO"]
)
self.add_node(
func_list,
root,
dt_settings.get_config()["PROFILER_MAX_DEPTH"],
cum_time_threshold,
)
self.record_stats({"func_list": [func.serialize() for func in func_list]})
| ProfilingPanel |
python | pytorch__pytorch | torch/jit/quantized.py | {
"start": 2278,
"end": 3193
} | class ____(QuantizedRNNBase):
def __init__(self, *args, **kwargs):
raise RuntimeError(
"torch.jit.QuantizedGRU is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.GRU instead."
)
def quantize_rnn_cell_modules(module):
raise RuntimeError(
"quantize_rnn_cell_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
def quantize_linear_modules(module, dtype=torch.int8):
raise RuntimeError(
"quantize_linear_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
def quantize_rnn_modules(module, dtype=torch.int8):
raise RuntimeError(
"quantize_rnn_modules function is no longer supported. "
"Please use torch.ao.quantization.quantize_dynamic API instead."
)
| QuantizedGRU |
python | django__django | django/db/models/functions/datetime.py | {
"start": 12603,
"end": 12970
} | class ____(TruncBase):
kind = "date"
lookup_name = "date"
output_field = DateField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
sql, params = compiler.compile(self.lhs)
tzname = self.get_tzname()
return connection.ops.datetime_cast_date_sql(sql, tuple(params), tzname)
| TruncDate |
python | getsentry__sentry | tests/sentry/deletions/tasks/test_scheduled.py | {
"start": 966,
"end": 7374
} | class ____(abc.ABC, TestCase):
__test__ = Abstract(__module__, __qualname__)
@property
@abstractmethod
def ScheduledDeletion(self) -> type[BaseScheduledDeletion]:
raise NotImplementedError("Subclasses should implement")
@abstractmethod
def create_simple_deletion(self) -> QuerySet[Model]:
raise NotImplementedError("Subclasses should implement!")
@abstractmethod
def create_does_not_proceed_deletion(self) -> QuerySet[Model]:
raise NotImplementedError("Subclasses should implement!")
@abstractmethod
def run_scheduled_deletions(self) -> None:
raise NotImplementedError("Subclasses should implement")
@abstractmethod
def reattempt_deletions(self) -> None:
raise NotImplementedError("Subclasses should implement")
def test_schedule_and_cancel(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(inst, days=0)
self.ScheduledDeletion.cancel(inst)
assert not self.ScheduledDeletion.objects.filter(id=schedule.id).exists()
# No errors if we cancel a delete that wasn't started.
assert self.ScheduledDeletion.cancel(inst) is None
def test_duplicate_schedule(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
first = self.ScheduledDeletion.schedule(inst, days=0)
second = self.ScheduledDeletion.schedule(inst, days=1)
# Should get the same record.
assert first.id == second.id
assert first.guid == second.guid
# Date should be updated
assert second.date_scheduled - first.date_scheduled >= timedelta(days=1)
def test_simple(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
with self.tasks():
self.run_scheduled_deletions()
assert not qs.exists()
assert not self.ScheduledDeletion.objects.filter(id=schedule.id).exists()
def test_should_proceed_check(self) -> None:
qs = self.create_does_not_proceed_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
with self.tasks():
self.run_scheduled_deletions()
assert qs.exists()
assert not self.ScheduledDeletion.objects.filter(id=schedule.id, in_progress=True).exists()
def test_ignore_in_progress(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
schedule.update(in_progress=True)
with self.tasks():
self.run_scheduled_deletions()
assert qs.exists()
assert self.ScheduledDeletion.objects.filter(id=schedule.id, in_progress=True).exists()
def test_future_schedule(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=1)
with self.tasks():
self.run_scheduled_deletions()
assert qs.exists()
assert self.ScheduledDeletion.objects.filter(id=schedule.id, in_progress=False).exists()
def test_triggers_pending_delete_signal(self) -> None:
signal_handler = Mock()
pending_delete.connect(signal_handler)
qs = self.create_simple_deletion()
inst = qs.get()
self.ScheduledDeletion.schedule(instance=inst, actor=self.user, days=0)
with self.tasks():
self.run_scheduled_deletions()
assert signal_handler.call_count == 1
args = signal_handler.call_args_list[0][1]
assert args["instance"] == inst
assert args["actor"] == user_service.get_user(user_id=self.user.id)
pending_delete.disconnect(signal_handler)
def test_no_pending_delete_trigger_on_skipped_delete(self) -> None:
qs = self.create_does_not_proceed_deletion()
inst = qs.get()
signal_handler = Mock()
pending_delete.connect(signal_handler)
self.ScheduledDeletion.schedule(instance=inst, actor=self.user, days=0)
with self.tasks():
self.run_scheduled_deletions()
pending_delete.disconnect(signal_handler)
assert signal_handler.call_count == 0
def test_handle_missing_record(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
# Delete the inst, the deletion should remove itself, as its work is done.
inst.delete()
with self.tasks():
self.run_scheduled_deletions()
assert not self.ScheduledDeletion.objects.filter(id=schedule.id).exists()
def test_reattempt_simple(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=-3)
schedule.update(in_progress=True)
with self.tasks():
self.reattempt_deletions()
schedule.refresh_from_db()
assert not schedule.in_progress
def test_reattempt_ignore_recent_jobs(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
schedule.update(in_progress=True)
with self.tasks():
self.reattempt_deletions()
schedule.refresh_from_db()
assert schedule.in_progress is True
def test_relocated_model(self) -> None:
qs = self.create_simple_deletion()
inst = qs.get()
model_name = type(inst).__name__
orig_app = inst._meta.app_label
relocated_models = {("other_app", model_name): (orig_app, model_name)}
# As if the model was scheduled when it was part of a different app
with (
mock.patch.object(inst._meta, "app_label", "other_app"),
mock.patch.dict("sentry.deletions.RELOCATED_MODELS", relocated_models),
self.tasks(),
):
schedule = self.ScheduledDeletion.schedule(instance=inst, days=0)
self.run_scheduled_deletions()
assert not qs.exists()
assert not self.ScheduledDeletion.objects.filter(id=schedule.id).exists()
| RegionalRunScheduleDeletionTest |
python | huggingface__transformers | src/transformers/models/rwkv/configuration_rwkv.py | {
"start": 842,
"end": 5204
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`RwkvModel`]. It is used to instantiate a RWKV
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RWVK-4
[RWKV/rwkv-4-169m-pile](https://huggingface.co/RWKV/rwkv-4-169m-pile) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50277):
Vocabulary size of the RWKV model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`RwkvModel`].
context_length (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model can be used with in a single forward (using it in RNN mode
lets use any sequence length).
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
attention_hidden_size (`int`, *optional*):
Dimensionality of the attention hidden states. Will default to `hidden_size` if unset.
intermediate_size (`int`, *optional*):
Dimensionality of the inner feed-forward layers. Will default to 4 times `hidden_size` if unset.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer
as GPTNeoX.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer as
GPTNeoX.
rescale_every (`int`, *optional*, defaults to 6):
At inference, the hidden states (and weights of the corresponding output layers) are divided by 2 every
`rescale_every` layer. If set to 0 or a negative number, no rescale is done.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the word embeddings with the input token embeddings.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last state.
Example:
```python
>>> from transformers import RwkvConfig, RwkvModel
>>> # Initializing a Rwkv configuration
>>> configuration = RwkvConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = RwkvModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "rwkv"
attribute_map = {"max_position_embeddings": "context_length"}
def __init__(
self,
vocab_size=50277,
context_length=1024,
hidden_size=4096,
num_hidden_layers=32,
attention_hidden_size=None,
intermediate_size=None,
layer_norm_epsilon=1e-5,
bos_token_id=0,
eos_token_id=0,
rescale_every=6,
tie_word_embeddings=False,
use_cache=True,
**kwargs,
):
self.vocab_size = vocab_size
self.context_length = context_length
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.attention_hidden_size = attention_hidden_size if attention_hidden_size is not None else hidden_size
self.intermediate_size = intermediate_size if intermediate_size is not None else 4 * hidden_size
self.layer_norm_epsilon = layer_norm_epsilon
self.rescale_every = rescale_every
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(
tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs
)
__all__ = ["RwkvConfig"]
| RwkvConfig |
python | coleifer__peewee | peewee.py | {
"start": 45665,
"end": 46132
} | class ____(ColumnBase):
def __init__(self, *path):
self._path = [part.replace('"', '""') for part in path if part]
def __getattr__(self, attr):
return Entity(*self._path + [attr])
def get_sort_key(self, ctx):
return tuple(self._path)
def __hash__(self):
return hash((self.__class__.__name__, tuple(self._path)))
def __sql__(self, ctx):
return ctx.literal(quote(self._path, ctx.state.quote or '""'))
| Entity |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/job_backfill.py | {
"start": 2215,
"end": 24080
} | class ____:
key_or_range: Union[str, PartitionKeyRange]
run_tags: Mapping[str, str]
run_config: Mapping[str, Any]
def execute_job_backfill_iteration(
backfill: PartitionBackfill,
logger: logging.Logger,
workspace_process_context: IWorkspaceProcessContext,
debug_crash_flags: Optional[Mapping[str, int]],
instance: DagsterInstance,
submit_threadpool_executor: Optional[ThreadPoolExecutor] = None,
) -> Optional[SerializableErrorInfo]:
if not backfill.last_submitted_partition_name:
logger.info(f"Starting job backfill for {backfill.backfill_id}")
else:
logger.info(
f"Resuming job backfill for {backfill.backfill_id} from"
f" {backfill.last_submitted_partition_name}"
)
# refetch in case the backfill status has changed
backfill = cast("PartitionBackfill", instance.get_backfill(backfill.backfill_id))
if backfill.status == BulkActionStatus.CANCELING or backfill.status == BulkActionStatus.FAILING:
status_once_runs_are_complete = (
BulkActionStatus.CANCELED
if backfill.status == BulkActionStatus.CANCELING
else BulkActionStatus.FAILED
)
all_runs_canceled = cancel_backfill_runs_and_cancellation_complete(
instance=instance,
backfill_id=backfill.backfill_id,
logger=logger,
)
if all_runs_canceled:
instance.update_backfill(
backfill.with_status(status_once_runs_are_complete).with_end_timestamp(
get_current_timestamp()
)
)
return
partition_set = _get_partition_set(workspace_process_context, backfill)
has_more = True
while has_more:
if backfill.status != BulkActionStatus.REQUESTED:
break
chunk, checkpoint, has_more = _get_partitions_chunk(
instance,
logger,
backfill,
CHECKPOINT_COUNT,
partition_set,
)
check_for_debug_crash(debug_crash_flags, "BEFORE_SUBMIT")
if chunk:
list(
submit_backfill_runs(
instance,
lambda: workspace_process_context.create_request_context(),
backfill,
chunk,
submit_threadpool_executor,
)
)
# after each chunk, refetch the backfill job to check for status changes
backfill = cast("PartitionBackfill", instance.get_backfill(backfill.backfill_id))
if backfill.status != BulkActionStatus.REQUESTED:
return
check_for_debug_crash(debug_crash_flags, "AFTER_SUBMIT")
if has_more:
# refetch, in case the backfill was updated in the meantime
backfill = cast("PartitionBackfill", instance.get_backfill(backfill.backfill_id))
instance.update_backfill(backfill.with_partition_checkpoint(checkpoint))
time.sleep(CHECKPOINT_INTERVAL)
else:
unfinished_runs = instance.get_runs(
RunsFilter(
tags=DagsterRun.tags_for_backfill_id(backfill.backfill_id),
statuses=NOT_FINISHED_STATUSES,
),
limit=1,
)
if unfinished_runs:
logger.info(
f"Backfill {backfill.backfill_id} has unfinished runs. Status will be updated when all runs are finished."
)
instance.update_backfill(backfill.with_partition_checkpoint(checkpoint))
return
partition_names = cast("Sequence[str]", backfill.partition_names)
logger.info(
f"Backfill completed for {backfill.backfill_id} for"
f" {len(partition_names)} partitions"
)
if (
len(
instance.get_run_ids(
filters=RunsFilter(
tags=DagsterRun.tags_for_backfill_id(backfill.backfill_id),
statuses=[DagsterRunStatus.FAILURE, DagsterRunStatus.CANCELED],
)
)
)
> 0
):
instance.update_backfill(
backfill.with_status(BulkActionStatus.COMPLETED_FAILED).with_end_timestamp(
get_current_timestamp()
)
)
else:
instance.update_backfill(
backfill.with_status(BulkActionStatus.COMPLETED_SUCCESS).with_end_timestamp(
get_current_timestamp()
)
)
def _get_partition_set(
workspace_process_context: IWorkspaceProcessContext, backfill_job: PartitionBackfill
) -> RemotePartitionSet:
origin = cast("RemotePartitionSetOrigin", backfill_job.partition_set_origin)
location_name = origin.repository_origin.code_location_origin.location_name
workspace = workspace_process_context.create_request_context()
code_location = workspace.get_code_location(location_name)
repo_name = origin.repository_origin.repository_name
if not code_location.has_repository(repo_name):
raise DagsterBackfillFailedError(
f"Could not find repository {repo_name} in location {code_location.name} to "
f"run backfill {backfill_job.backfill_id}."
)
partition_set_name = origin.partition_set_name
remote_repo = code_location.get_repository(repo_name)
if not remote_repo.has_partition_set(partition_set_name):
raise DagsterBackfillFailedError(
f"Could not find partition set {partition_set_name} in repository {repo_name}. "
)
return remote_repo.get_partition_set(partition_set_name)
def _subdivide_partition_key_range(
partitions_def: PartitionsDefinition,
partition_key_range: PartitionKeyRange,
max_range_size: Optional[int],
) -> Sequence[PartitionKeyRange]:
"""Take a partition key range and subdivide it into smaller ranges of size max_range_size. This
is done to satisfy backfill policies that limit the maximum number of partitions that can be
materialized in a run.
"""
if max_range_size is None:
return [partition_key_range]
else:
keys = partitions_def.get_partition_keys_in_range(partition_key_range)
chunks = [keys[i : i + max_range_size] for i in range(0, len(keys), max_range_size)]
return [PartitionKeyRange(start=chunk[0], end=chunk[-1]) for chunk in chunks]
def _get_partitions_chunk(
instance: DagsterInstance,
logger: logging.Logger,
backfill_job: PartitionBackfill,
chunk_size: int,
partition_set: RemotePartitionSet,
) -> tuple[Sequence[Union[str, PartitionKeyRange]], str, bool]:
partition_names = cast("Sequence[str]", backfill_job.partition_names)
checkpoint = backfill_job.last_submitted_partition_name
backfill_policy = partition_set.backfill_policy
if (
backfill_job.last_submitted_partition_name
and backfill_job.last_submitted_partition_name in partition_names
):
index = partition_names.index(backfill_job.last_submitted_partition_name)
partition_names = partition_names[index + 1 :]
# for idempotence, fetch all runs with the current backfill id
backfill_runs = instance.get_runs(
RunsFilter(tags=DagsterRun.tags_for_backfill_id(backfill_job.backfill_id))
)
# fetching the partitions def of a legacy dynamic partitioned op-job will raise an error
# so guard against it by checking if the partitions def exists first
partitions_def = (
partition_set.get_partitions_definition()
if partition_set.has_partitions_definition()
else None
)
completed_partitions = []
for run in backfill_runs:
if (
run.tags.get(ASSET_PARTITION_RANGE_START_TAG)
and run.tags.get(ASSET_PARTITION_RANGE_END_TAG)
and run.tags.get(PARTITION_NAME_TAG) is None
and partitions_def is not None
):
if partitions_def is None:
# We should not hit this case, since all PartitionsDefinitions that can be put on
# assets are fetchable via the ExternalPartitionSet. However, we do this check so that
# we only fetch the partitions def once before the loop
raise DagsterInvariantViolationError(
f"Cannot access PartitionsDefinition for backfill {backfill_job.backfill_id}. "
)
completed_partitions.extend(
partitions_def.get_partition_keys_in_range(
PartitionKeyRange(
start=run.tags[ASSET_PARTITION_RANGE_START_TAG],
end=run.tags[ASSET_PARTITION_RANGE_END_TAG],
),
)
)
elif run.tags.get(PARTITION_NAME_TAG):
completed_partitions.append(run.tags[PARTITION_NAME_TAG])
initial_checkpoint = (
partition_names.index(checkpoint) + 1 if checkpoint and checkpoint in partition_names else 0
)
partition_names = partition_names[initial_checkpoint:]
if len(partition_names) == 0:
# no more partitions to submit, return early
return [], checkpoint or "", False
if backfill_policy and backfill_policy.max_partitions_per_run != 1:
to_submit = [
partition_name
for partition_name in partition_names
if partition_name not in completed_partitions
]
partitions_def = partition_set.get_partitions_definition()
partitions_subset = partitions_def.subset_with_partition_keys(to_submit)
partition_key_ranges = partitions_subset.get_partition_key_ranges(partitions_def)
subdivided_ranges = [
sr
for r in partition_key_ranges
for sr in _subdivide_partition_key_range(
partitions_def, r, backfill_policy.max_partitions_per_run
)
]
ranges_to_launch = subdivided_ranges[:chunk_size]
has_more = chunk_size < len(subdivided_ranges)
next_checkpoint = ranges_to_launch[-1].end if len(ranges_to_launch) > 0 else checkpoint
to_submit = ranges_to_launch
else:
has_more = chunk_size < len(partition_names)
partitions_chunk = partition_names[:chunk_size]
next_checkpoint = partitions_chunk[-1]
to_skip = set(partitions_chunk).intersection(completed_partitions)
if to_skip:
logger.info(
f"Found {len(to_skip)} existing runs for backfill {backfill_job.backfill_id}, skipping"
)
to_submit = [
partition_name
for partition_name in partitions_chunk
if partition_name not in completed_partitions
]
return to_submit, next_checkpoint or "", has_more
def submit_backfill_runs(
instance: DagsterInstance,
create_workspace: Callable[[], BaseWorkspaceRequestContext],
backfill_job: PartitionBackfill,
partition_names_or_ranges: Optional[Sequence[Union[str, PartitionKeyRange]]] = None,
submit_threadpool_executor: Optional[ThreadPoolExecutor] = None,
) -> Iterable[Optional[str]]:
"""Returns the run IDs of the submitted runs."""
origin = cast("RemotePartitionSetOrigin", backfill_job.partition_set_origin)
repository_origin = origin.repository_origin
repo_name = repository_origin.repository_name
location_name = repository_origin.code_location_origin.location_name
if not partition_names_or_ranges:
partition_names_or_ranges = cast("Sequence[str]", backfill_job.partition_names)
workspace = create_workspace()
code_location = workspace.get_code_location(location_name)
check.invariant(
code_location.has_repository(repo_name),
f"Could not find repository {repo_name} in location {code_location.name}",
)
remote_repo = code_location.get_repository(repo_name)
partition_set_name = origin.partition_set_name
partition_set = remote_repo.get_partition_set(partition_set_name)
if backfill_job.asset_selection:
# need to make another call to the user code location to properly subset
# for an asset selection
pipeline_selector = JobSubsetSelector(
location_name=code_location.name,
repository_name=repo_name,
job_name=partition_set.job_name,
op_selection=None,
asset_selection=backfill_job.asset_selection,
)
remote_job = code_location.get_job(pipeline_selector)
else:
remote_job = remote_repo.get_full_job(partition_set.job_name)
partition_data_target = check.is_list(
[partition_names_or_ranges[0].start]
if isinstance(partition_names_or_ranges[0], PartitionKeyRange)
else partition_names_or_ranges,
of_type=str,
)
partition_set_execution_data = code_location.get_partition_set_execution_params(
remote_repo.handle,
partition_set_name,
partition_data_target,
instance,
)
assert isinstance(partition_set_execution_data, PartitionSetExecutionParamSnap)
# Partition-scoped run config is prohibited at the definitions level for a jobs that materialize
# ranges, so we can assume that all partition data will have the same run config and tags as the
# first partition.
tags_by_key_or_range: Mapping[Union[str, PartitionKeyRange], Mapping[str, str]]
run_config_by_key_or_range: Mapping[Union[str, PartitionKeyRange], Mapping[str, Any]]
if isinstance(partition_names_or_ranges[0], PartitionKeyRange):
partition_set_run_config = partition_set_execution_data.partition_data[0].run_config
if partition_set_run_config and backfill_job.run_config:
raise DagsterInvariantViolationError(
"Cannot specify both partition-scoped run config and backfill-scoped run config. This can happen "
"if you explicitly set a PartitionSet on your job and also specify run config when launching a backfill.",
)
run_config = partition_set_run_config or backfill_job.run_config or {}
tags = {
k: v
for k, v in partition_set_execution_data.partition_data[0].tags.items()
if k != PARTITION_NAME_TAG
}
run_config_by_key_or_range = {r: run_config for r in partition_names_or_ranges}
tags_by_key_or_range = {
r: {
**tags,
ASSET_PARTITION_RANGE_START_TAG: r.start,
ASSET_PARTITION_RANGE_END_TAG: r.end,
}
for r in check.is_list(partition_names_or_ranges, of_type=PartitionKeyRange)
}
else:
run_config_by_key_or_range = {
pd.name: pd.run_config or backfill_job.run_config or {}
for pd in partition_set_execution_data.partition_data
}
tags_by_key_or_range = {
pd.name: pd.tags for pd in partition_set_execution_data.partition_data
}
def create_and_submit_partition_run(backfill_run_request: BackfillRunRequest) -> Optional[str]:
workspace = create_workspace()
code_location = workspace.get_code_location(location_name)
dagster_run = create_backfill_run(
workspace,
instance,
code_location,
remote_job,
partition_set,
backfill_job,
backfill_run_request.key_or_range,
backfill_run_request.run_tags,
backfill_run_request.run_config,
)
if dagster_run:
# we skip runs in certain cases, e.g. we are running a `from_failure` backfill job
# and the partition has had a successful run since the time the backfill was
# scheduled
instance.submit_run(dagster_run.run_id, workspace)
return dagster_run.run_id
return None
batch_run_requests = [
BackfillRunRequest(
key_or_range=key_or_range,
run_tags=tags_by_key_or_range[key_or_range],
run_config=run_config_by_key_or_range[key_or_range],
)
for key_or_range in partition_names_or_ranges
]
if submit_threadpool_executor:
yield from submit_threadpool_executor.map(
create_and_submit_partition_run, batch_run_requests
)
else:
yield from map(create_and_submit_partition_run, batch_run_requests)
def create_backfill_run(
request_context: BaseWorkspaceRequestContext,
instance: DagsterInstance,
code_location: CodeLocation,
remote_job: RemoteJob,
remote_partition_set: RemotePartitionSet,
backfill_job: PartitionBackfill,
partition_key_or_range: Union[str, PartitionKeyRange],
run_tags: Mapping[str, str],
run_config: Mapping[str, Any],
) -> Optional[DagsterRun]:
from dagster._daemon.daemon import get_telemetry_daemon_session_id
log_action(
instance,
BACKFILL_RUN_CREATED,
metadata={
"DAEMON_SESSION_ID": get_telemetry_daemon_session_id(),
"repo_hash": hash_name(code_location.name),
"pipeline_name_hash": hash_name(remote_job.name),
},
)
tags = merge_dicts(
remote_job.tags,
run_tags,
DagsterRun.tags_for_backfill_id(backfill_job.backfill_id),
backfill_job.tags,
)
resolved_op_selection = None
op_selection = None
if not backfill_job.from_failure and not backfill_job.reexecution_steps:
step_keys_to_execute = None
parent_run_id = None
root_run_id = None
known_state = None
if remote_partition_set.op_selection:
resolved_op_selection = frozenset(remote_partition_set.op_selection)
op_selection = remote_partition_set.op_selection
elif backfill_job.from_failure:
last_run = _fetch_last_run(instance, remote_partition_set, partition_key_or_range)
if not last_run or last_run.status != DagsterRunStatus.FAILURE:
return None
return instance.create_reexecuted_run(
parent_run=last_run,
request_context=request_context,
code_location=code_location,
remote_job=remote_job,
strategy=ReexecutionStrategy.FROM_FAILURE,
extra_tags=tags,
run_config=run_config,
use_parent_run_tags=False, # don't inherit tags from the previous run
)
else: # backfill_job.reexecution_steps
last_run = _fetch_last_run(instance, remote_partition_set, partition_key_or_range)
parent_run_id = last_run.run_id if last_run else None
root_run_id = (last_run.root_run_id or last_run.run_id) if last_run else None
if parent_run_id and root_run_id:
tags = merge_dicts(
tags, {PARENT_RUN_ID_TAG: parent_run_id, ROOT_RUN_ID_TAG: root_run_id}
)
step_keys_to_execute = backfill_job.reexecution_steps
if last_run and last_run.status == DagsterRunStatus.SUCCESS:
known_state = KnownExecutionState.build_for_reexecution(
instance,
last_run,
).update_for_step_selection(step_keys_to_execute)
else:
known_state = None
if remote_partition_set.op_selection:
resolved_op_selection = frozenset(remote_partition_set.op_selection)
op_selection = remote_partition_set.op_selection
remote_execution_plan = code_location.get_execution_plan(
remote_job,
run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=instance,
)
return instance.create_run(
job_snapshot=remote_job.job_snapshot,
execution_plan_snapshot=remote_execution_plan.execution_plan_snapshot,
parent_job_snapshot=remote_job.parent_job_snapshot,
job_name=remote_job.name,
run_id=make_new_run_id(),
resolved_op_selection=resolved_op_selection,
run_config=run_config,
step_keys_to_execute=step_keys_to_execute,
tags=tags,
root_run_id=root_run_id,
parent_run_id=parent_run_id,
status=DagsterRunStatus.NOT_STARTED,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
op_selection=op_selection,
asset_selection=(
frozenset(backfill_job.asset_selection) if backfill_job.asset_selection else None
),
asset_check_selection=None,
asset_graph=code_location.get_repository(
remote_job.repository_handle.repository_name
).asset_graph,
)
def _fetch_last_run(
instance: DagsterInstance,
remote_partition_set: RemotePartitionSet,
partition_key_or_range: Union[str, PartitionKeyRange],
) -> Optional[DagsterRun]:
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(remote_partition_set, "remote_partition_set", RemotePartitionSet)
check.str_param(partition_key_or_range, "partition_name")
tags = (
{
PARTITION_NAME_TAG: partition_key_or_range,
}
if isinstance(partition_key_or_range, str)
else {
ASSET_PARTITION_RANGE_START_TAG: partition_key_or_range.start,
ASSET_PARTITION_RANGE_END_TAG: partition_key_or_range.end,
}
)
runs = instance.get_runs(
RunsFilter(
job_name=remote_partition_set.job_name,
tags={PARTITION_SET_TAG: remote_partition_set.name, **tags},
),
limit=1,
)
return runs[0] if runs else None
| BackfillRunRequest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_checks.py | {
"start": 5268,
"end": 8273
} | class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
assetKey = graphene.NonNull(GrapheneAssetKey)
description = graphene.String()
jobNames = non_null_list(graphene.String)
executionForLatestMaterialization = graphene.Field(GrapheneAssetCheckExecution)
canExecuteIndividually = graphene.NonNull(GrapheneAssetCheckCanExecuteIndividually)
blocking = graphene.NonNull(graphene.Boolean)
additionalAssetKeys = non_null_list(GrapheneAssetKey)
automationCondition = graphene.Field(GrapheneAutomationCondition)
class Meta:
name = "AssetCheck"
def __init__(
self,
remote_node: RemoteAssetCheckNode,
):
self._remote_node = remote_node
self._asset_check = remote_node.asset_check
def resolve_assetKey(self, _) -> AssetKey:
return self._asset_check.asset_key
def resolve_name(self, _) -> str:
return self._asset_check.name
def resolve_description(self, _) -> Optional[str]:
return self._asset_check.description
def resolve_jobNames(self, _) -> Sequence[str]:
return self._asset_check.job_names
async def resolve_executionForLatestMaterialization(
self, graphene_info: ResolveInfo
) -> Optional[GrapheneAssetCheckExecution]:
record = await AssetCheckExecutionRecord.gen(graphene_info.context, self._asset_check.key)
return (
GrapheneAssetCheckExecution(record)
if record and await record.targets_latest_materialization(graphene_info.context)
else None
)
def resolve_canExecuteIndividually(self, _: ResolveInfo):
return (
GrapheneAssetCheckCanExecuteIndividually.CAN_EXECUTE
if len(self._remote_node.execution_set_entity_keys) <= 1
# NOTE: once we support multi checks, we'll need to add a case for
# non subsettable multi checks
else GrapheneAssetCheckCanExecuteIndividually.REQUIRES_MATERIALIZATION
)
def resolve_blocking(self, _) -> bool:
return self._asset_check.blocking
def resolve_additionalAssetKeys(self, _) -> Sequence[GrapheneAssetKey]:
return [
GrapheneAssetKey(path=asset_key.path)
for asset_key in self._asset_check.additional_asset_keys
]
def resolve_automationCondition(
self, _graphene_info: ResolveInfo
) -> Optional[GrapheneAutoMaterializePolicy]:
automation_condition = (
self._asset_check.automation_condition_snapshot
or self._asset_check.automation_condition
)
if automation_condition:
return GrapheneAutomationCondition(
# we only store one of automation_condition or automation_condition_snapshot
automation_condition
if isinstance(automation_condition, AutomationConditionSnapshot)
else automation_condition.get_snapshot()
)
return None
| GrapheneAssetCheck |
python | django__django | tests/admin_views/admin.py | {
"start": 16718,
"end": 16792
} | class ____(admin.ModelAdmin):
readonly_fields = ("toppings",)
| PizzaAdmin |
python | django__django | django/db/models/functions/text.py | {
"start": 3776,
"end": 4834
} | class ____(Func):
"""
Concatenate text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure a non-null result.
"""
function = None
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError("Concat must take at least two expressions")
paired = self._paired(expressions, output_field=extra.get("output_field"))
super().__init__(paired, **extra)
def _paired(self, expressions, output_field):
# wrap pairs of expressions in successive concat functions
# exp = [a, b, c, d]
# -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d))))
if len(expressions) == 2:
return ConcatPair(*expressions, output_field=output_field)
return ConcatPair(
expressions[0],
self._paired(expressions[1:], output_field=output_field),
output_field=output_field,
)
| Concat |
python | scipy__scipy | scipy/integrate/_quad_vec.py | {
"start": 2186,
"end": 21663
} | class ____:
def __init__(self, **kwargs):
self.__keys = kwargs.keys()
self.__dict__.update(**kwargs)
def __repr__(self):
key_value_pairs = ', '.join(
f'{k}={repr(self.__dict__[k])}' for k in self.__keys
)
return f"_Bunch({key_value_pairs})"
@xp_capabilities(np_only=True)
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6,
limit=10000, workers=1, points=None, quadrature=None, full_output=False,
*, args=()):
r"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using
:class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
:meth:`python:multiprocessing.pool.Pool.map` for evaluating the
population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'gk15' (Gauss-Kronrod 15-point rule),
'trapezoid' (composite trapezoid rule).
Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite.
full_output : bool, optional
Return an additional ``info`` object.
args : tuple, optional
Extra arguments to pass to function, if any.
.. versionadded:: 1.8.0
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : object
Returned only when ``full_output=True``.
Result object with the attributes:
success : bool
Whether integration reached target precision.
status : int
Indicator for convergence, success (0),
failure (1), and failure due to rounding error (2).
neval : int
Number of function evaluations.
intervals : ndarray, shape (num_intervals, 2)
Start and end points of subdivision intervals.
integrals : ndarray, shape (num_intervals, ...)
Integral for each interval.
Note that at most ``cache_size`` values are recorded,
and the array may contains *nan* for missing items.
errors : ndarray, shape (num_intervals,)
Estimated integration error for each interval.
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
Examples
--------
We can compute integrations of a vector-valued function:
>>> from scipy.integrate import quad_vec
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> alpha = np.linspace(0.0, 2.0, num=30)
>>> f = lambda x: x**alpha
>>> x0, x1 = 0, 2
>>> y, err = quad_vec(f, x0, x1)
>>> plt.plot(alpha, y)
>>> plt.xlabel(r"$\alpha$")
>>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
>>> plt.show()
When using the argument `workers`, one should ensure
that the main module is import-safe, for instance
by rewriting the example above as:
.. code-block:: python
from scipy.integrate import quad_vec
import numpy as np
import matplotlib.pyplot as plt
alpha = np.linspace(0.0, 2.0, num=30)
x0, x1 = 0, 2
def f(x):
return x**alpha
if __name__ == "__main__":
y, err = quad_vec(f, x0, x1, workers=2)
"""
a = float(a)
b = float(b)
if args:
if not isinstance(args, tuple):
args = (args,)
# create a wrapped function to allow the use of map and Pool.map
f = _FunctionWrapper(f, args)
# Use simple transformations to deal with integrals over infinite
# intervals.
kwargs = dict(epsabs=epsabs,
epsrel=epsrel,
norm=norm,
cache_size=cache_size,
limit=limit,
workers=workers,
points=points,
quadrature='gk15' if quadrature is None else quadrature,
full_output=full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, **kwargs)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, **kwargs)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. explicitly split integral at t=0, which separates
# the positive and negative sides
f2 = DoubleInfiniteFunc(f)
if points is not None:
kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
else:
kwargs['points'] = (0,)
if a != b:
res = quad_vec(f2, -1, 1, **kwargs)
else:
res = quad_vec(f2, 1, 1, **kwargs)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError(f"invalid integration bounds a={a}, b={b}")
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
parallel_count = 128
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'gk15': _quadrature_gk15,
'trapezoid': _quadrature_trapezoid}[quadrature]
except KeyError as e:
raise ValueError(f"unknown quadrature {quadrature!r}") from e
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if not (a < p < b) or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, float | complex):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
CONVERGED = 0
NOT_CONVERGED = 1
ROUNDING_ERROR = 2
NOT_A_NUMBER = 3
status_msg = {
CONVERGED: "Target precision reached.",
NOT_CONVERGED: "Target precision not reached.",
ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
NOT_A_NUMBER: "Non-finite values encountered."
}
# Process intervals
with MapWrapper(workers) as mapwrapper:
ier = NOT_CONVERGED
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
err_sum = 0
for j in range(parallel_count):
if not intervals:
break
if j > 0 and err_sum > global_error - tol/8:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(
((-neg_old_err, a, b, old_int), f, norm_func, _quadrature)
)
err_sum += -neg_old_err
# Subdivide intervals
for parts in mapwrapper(_subdivide_interval, to_process):
dint, derr, dround_err, subint, dneval = parts
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = CONVERGED
break
if global_error < rounding_error:
ier = ROUNDING_ERROR
break
if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
ier = NOT_A_NUMBER
break
res = global_integral
err = global_error + rounding_error
if full_output:
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
errors = np.array([-z[0] for z in intervals])
intervals = np.array([[z[1], z[2]] for z in intervals])
info = _Bunch(neval=neval,
success=(ier == CONVERGED),
status=ier,
message=status_msg[ier],
intervals=intervals,
integrals=integrals,
errors=errors)
return (res, err, info)
else:
return (res, err)
def _subdivide_interval(args):
interval, f, norm_func, _quadrature = args
old_err, a, b, old_int = interval
c = 0.5 * (a + b)
# Left-hand side
if getattr(_quadrature, 'cache_size', 0) > 0:
f = functools.lru_cache(_quadrature.cache_size)(f)
s1, err1, round1 = _quadrature(a, c, f, norm_func)
dneval = _quadrature.num_eval
s2, err2, round2 = _quadrature(c, b, f, norm_func)
dneval += _quadrature.num_eval
if old_int is None:
old_int, _, _ = _quadrature(a, b, f, norm_func)
dneval += _quadrature.num_eval
if getattr(_quadrature, 'cache_size', 0) > 0:
dneval = f.cache_info().misses
dint = s1 + s2 - old_int
derr = err1 + err2 - old_err
dround_err = round1 + round2
subintervals = ((a, c, s1, err1), (c, b, s2, err2))
return dint, derr, dround_err, subintervals, dneval
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err
_quadrature_trapezoid.cache_size = 3 * 3
_quadrature_trapezoid.num_eval = 3
def _quadrature_gk(a, b, f, norm_func, x, w, v):
"""
Generic Gauss-Kronrod quadrature
"""
fv = [0.0]*len(x)
c = 0.5 * (a + b)
h = 0.5 * (b - a)
# Gauss-Kronrod
s_k = 0.0
s_k_abs = 0.0
for i in range(len(x)):
ff = f(c + h*x[i])
fv[i] = ff
vv = v[i]
# \int f(x)
s_k += vv * ff
# \int |f(x)|
s_k_abs += vv * abs(ff)
# Gauss
s_g = 0.0
for i in range(len(w)):
s_g += w[i] * fv[2*i + 1]
# Quadrature of abs-deviation from average
s_k_dabs = 0.0
y0 = s_k / 2.0
for i in range(len(x)):
# \int |f(x) - y0|
s_k_dabs += v[i] * abs(fv[i] - y0)
# Use similar error estimation as quadpack
err = float(norm_func((s_k - s_g) * h))
dabs = float(norm_func(s_k_dabs * h))
if dabs != 0 and err != 0:
err = dabs * min(1.0, (200 * err / dabs)**1.5)
eps = sys.float_info.epsilon
round_err = float(norm_func(50 * eps * h * s_k_abs))
if round_err > sys.float_info.min:
err = max(err, round_err)
return h * s_k, err, round_err
def _quadrature_gk21(a, b, f, norm_func):
"""
Gauss-Kronrod 21 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.995657163025808080735527280689003,
0.973906528517171720077964012084452,
0.930157491355708226001207180059508,
0.865063366688984510732096688423493,
0.780817726586416897063717578345042,
0.679409568299024406234327365114874,
0.562757134668604683339000099272694,
0.433395394129247190799265943165784,
0.294392862701460198131126603103866,
0.148874338981631210884826001129720,
0,
-0.148874338981631210884826001129720,
-0.294392862701460198131126603103866,
-0.433395394129247190799265943165784,
-0.562757134668604683339000099272694,
-0.679409568299024406234327365114874,
-0.780817726586416897063717578345042,
-0.865063366688984510732096688423493,
-0.930157491355708226001207180059508,
-0.973906528517171720077964012084452,
-0.995657163025808080735527280689003)
# 10-point weights
w = (0.066671344308688137593568809893332,
0.149451349150580593145776339657697,
0.219086362515982043995534934228163,
0.269266719309996355091226921569469,
0.295524224714752870173892994651338,
0.295524224714752870173892994651338,
0.269266719309996355091226921569469,
0.219086362515982043995534934228163,
0.149451349150580593145776339657697,
0.066671344308688137593568809893332)
# 21-point weights
v = (0.011694638867371874278064396062192,
0.032558162307964727478818972459390,
0.054755896574351996031381300244580,
0.075039674810919952767043140916190,
0.093125454583697605535065465083366,
0.109387158802297641899210590325805,
0.123491976262065851077958109831074,
0.134709217311473325928054001771707,
0.142775938577060080797094273138717,
0.147739104901338491374841515972068,
0.149445554002916905664936468389821,
0.147739104901338491374841515972068,
0.142775938577060080797094273138717,
0.134709217311473325928054001771707,
0.123491976262065851077958109831074,
0.109387158802297641899210590325805,
0.093125454583697605535065465083366,
0.075039674810919952767043140916190,
0.054755896574351996031381300244580,
0.032558162307964727478818972459390,
0.011694638867371874278064396062192)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk21.num_eval = 21
def _quadrature_gk15(a, b, f, norm_func):
"""
Gauss-Kronrod 15 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.991455371120812639206854697526329,
0.949107912342758524526189684047851,
0.864864423359769072789712788640926,
0.741531185599394439863864773280788,
0.586087235467691130294144838258730,
0.405845151377397166906606412076961,
0.207784955007898467600689403773245,
0.000000000000000000000000000000000,
-0.207784955007898467600689403773245,
-0.405845151377397166906606412076961,
-0.586087235467691130294144838258730,
-0.741531185599394439863864773280788,
-0.864864423359769072789712788640926,
-0.949107912342758524526189684047851,
-0.991455371120812639206854697526329)
# 7-point weights
w = (0.129484966168869693270611432679082,
0.279705391489276667901467771423780,
0.381830050505118944950369775488975,
0.417959183673469387755102040816327,
0.381830050505118944950369775488975,
0.279705391489276667901467771423780,
0.129484966168869693270611432679082)
# 15-point weights
v = (0.022935322010529224963732008058970,
0.063092092629978553290700663189204,
0.104790010322250183839876322541518,
0.140653259715525918745189590510238,
0.169004726639267902826583426598550,
0.190350578064785409913256402421014,
0.204432940075298892414161999234649,
0.209482141084727828012999174891714,
0.204432940075298892414161999234649,
0.190350578064785409913256402421014,
0.169004726639267902826583426598550,
0.140653259715525918745189590510238,
0.104790010322250183839876322541518,
0.063092092629978553290700663189204,
0.022935322010529224963732008058970)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk15.num_eval = 15
| _Bunch |
python | pytorch__pytorch | torch/_inductor/dependencies.py | {
"start": 10782,
"end": 12423
} | class ____(Dep):
# pyrefly: ignore [bad-override]
name: str
mode: Optional[str] = None
# depends on the entire buffer
@property
# pyrefly: ignore [bad-override]
def index(self) -> sympy.Expr:
raise NotImplementedError("StarDep does not have an index")
def get_numel(self) -> sympy.Expr:
return V.graph.get_numel(self.name) # type: ignore[return-value]
def rename(self, renames: dict[str, str]) -> "StarDep":
if self.name in renames:
return StarDep(renames[self.name], self.mode)
return self
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def numbytes_hint(self) -> int:
try:
return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
V.graph.get_dtype(self.name)
)
except NotImplementedError:
return 0 # NoneLayout, MultiOutputLayout, etc
def has_unbacked_symbols(self) -> bool:
return len(free_unbacked_symbols(self.get_numel())) > 0
def is_contiguous(self) -> bool:
return False
def is_scalar(self) -> bool:
return False
def is_indirect(self) -> bool:
return False
# Used for tracking mutation ordering
# if A reads a buffer and B mutates it
# B must be ordered after A
#
# This is useful for a variety of reasons.
# For example, if A's read is never actually used, we can eliminate it.
# Another case is if A's buffer ends up being fused away, we never need to
# materialize that buffer
@dataclasses.dataclass(frozen=True)
| StarDep |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 36325,
"end": 37283
} | class ____(TestCase):
def test_bulk_create(self):
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BulkCreateModel
fields = ('name',)
class BulkCreateSerializer(serializers.ListSerializer):
child = BasicModelSerializer()
data = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'}]
serializer = BulkCreateSerializer(data=data)
assert serializer.is_valid()
# Objects are returned by save().
instances = serializer.save()
assert len(instances) == 3
assert [item.name for item in instances] == ['a', 'b', 'c']
# Objects have been created in the database.
assert BulkCreateModel.objects.count() == 3
assert list(BulkCreateModel.objects.values_list('name', flat=True)) == ['a', 'b', 'c']
# Serializer returns correct data.
assert serializer.data == data
| TestBulkCreate |
python | doocs__leetcode | solution/0000-0099/0085.Maximal Rectangle/Solution.py | {
"start": 0,
"end": 1074
} | class ____:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
heights = [0] * len(matrix[0])
ans = 0
for row in matrix:
for j, v in enumerate(row):
if v == "1":
heights[j] += 1
else:
heights[j] = 0
ans = max(ans, self.largestRectangleArea(heights))
return ans
def largestRectangleArea(self, heights: List[int]) -> int:
n = len(heights)
stk = []
left = [-1] * n
right = [n] * n
for i, h in enumerate(heights):
while stk and heights[stk[-1]] >= h:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
h = heights[i]
while stk and heights[stk[-1]] >= h:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
return max(h * (right[i] - left[i] - 1) for i, h in enumerate(heights))
| Solution |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py | {
"start": 28268,
"end": 31561
} | class ____(Wav2Vec2Model, Wav2Vec2BertPreTrainedModel):
def __init__(self, config: Wav2Vec2BertConfig):
Wav2Vec2BertPreTrainedModel.__init__(self, config)
self.config = config
self.feature_projection = Wav2Vec2BertFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = Wav2Vec2BertEncoder(config)
self.adapter = Wav2Vec2BertAdapter(config) if config.add_adapter else None
self.intermediate_ffn = None
if config.use_intermediate_ffn_before_adapter:
self.intermediate_ffn = Wav2Vec2BertFeedForward(config, act_fn="relu")
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
raise AttributeError("Not needed for Wav2Vec2Bert")
def forward(
self,
input_features: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Wav2Vec2BertBaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states, extract_features = self.feature_projection(input_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.intermediate_ffn:
expanded_hidden_states = self.intermediate_ffn(hidden_states)
hidden_states = hidden_states + 0.5 * expanded_hidden_states
if self.adapter is not None:
hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BertBaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| Wav2Vec2BertModel |
python | simplejson__simplejson | simplejson/tests/test_unicode.py | {
"start": 143,
"end": 7056
} | class ____(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEqual(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEqual(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEqual(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEqual(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEqual(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEqual(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEqual(json.dumps(u), '"\\ud834\\udd20"')
self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEqual(json.loads('"' + u + '"'), u)
self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
#s = '"\\u{0:04x}"'.format(i)
s = '"\\u%04x"' % (i,)
self.assertEqual(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEqual(type(json.loads(u'""')), text_type)
self.assertEqual(type(json.loads(u'"a"')), text_type)
self.assertEqual(type(json.loads(u'["a"]')[0]), text_type)
def test_ensure_ascii_false_returns_unicode(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type)
self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type)
def test_ensure_ascii_false_bytestring_encoding(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
doc1 = {u'quux': b('Arr\xc3\xaat sur images')}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEqual(json.dumps(doc1), doc_ascii)
self.assertEqual(json.dumps(doc2), doc_ascii)
self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode)
def test_ensure_ascii_linebreak_encoding(self):
# http://timelessrepo.com/json-isnt-a-javascript-subset
s1 = u'\u2029\u2028'
s2 = s1.encode('utf8')
expect = '"\\u2029\\u2028"'
expect_non_ascii = u'"\u2029\u2028"'
self.assertEqual(json.dumps(s1), expect)
self.assertEqual(json.dumps(s2), expect)
self.assertEqual(json.dumps(s1, ensure_ascii=False), expect_non_ascii)
self.assertEqual(json.dumps(s2, ensure_ascii=False), expect_non_ascii)
def test_invalid_escape_sequences(self):
# incomplete escape sequence
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234')
# invalid escape sequence
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"')
if sys.maxunicode > 65535:
# invalid escape sequence for low surrogate
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"')
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"')
def test_ensure_ascii_still_works(self):
# in the ascii range, ensure that everything is the same
for c in map(unichr, range(0, 127)):
self.assertEqual(
json.dumps(c, ensure_ascii=False),
json.dumps(c))
snowman = u'\N{SNOWMAN}'
self.assertEqual(
json.dumps(c, ensure_ascii=False),
'"' + c + '"')
def test_strip_bom(self):
content = u"\u3053\u3093\u306b\u3061\u308f"
json_doc = codecs.BOM_UTF8 + b(json.dumps(content))
self.assertEqual(json.load(BytesIO(json_doc)), content)
for doc in json_doc, json_doc.decode('utf8'):
self.assertEqual(json.loads(doc), content)
| TestUnicode |
python | keon__algorithms | algorithms/map/hashtable.py | {
"start": 3168,
"end": 3899
} | class ____(HashTable):
MIN_SIZE = 8
def __init__(self):
super().__init__(self.MIN_SIZE)
def put(self, key, value):
rv = super().put(key, value)
# increase size of dict * 2 if filled >= 2/3 size (like python dict)
if len(self) >= (self.size * 2) / 3:
self.__resize()
def __resize(self):
keys, values = self._keys, self._values
self.size *= 2 # this will be the new size
self._len = 0
self._keys = [self._empty] * self.size
self._values = [self._empty] * self.size
for key, value in zip(keys, values):
if key is not self._empty and key is not self._deleted:
self.put(key, value)
| ResizableHashTable |
python | huggingface__transformers | tests/models/openai/test_modeling_openai.py | {
"start": 6732,
"end": 10936
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["input_ids"] = inputs_dict["labels"]
inputs_dict["token_type_ids"] = inputs_dict["labels"]
inputs_dict["mc_token_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices),
dtype=torch.long,
device=torch_device,
)
inputs_dict["mc_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = OpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_openai_gpt_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
def test_openai_gpt_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "openai-community/openai-gpt"
model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Tied weights mapping is reversed, so this is supposed to error out")
def test_correct_missing_keys(self):
# openai defines `_tied_weights_keys = {"transformer.tokens_embed.weight": "lm_head.weight"}` instead
# of the usual `_tied_weights_keys = {"lm_head.weight": "transformer.tokens_embed.weight"}`, so removing
# the head parameters actually removes the source weight, so this test is supposed to fail
pass
@require_torch
| OpenAIGPTModelTest |
python | pikepdf__pikepdf | src/pikepdf/codec.py | {
"start": 4463,
"end": 4763
} | class ____(codecs.IncrementalEncoder):
"""Implement PdfDocEncoding incremental encoder."""
def encode(self, input: str, final: bool = False) -> bytes:
"""Implement codecs.IncrementalEncoder.encode for pdfdoc."""
return pdfdoc_encode(input, 'strict')[0]
| PdfDocIncrementalEncoder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1259794,
"end": 1260494
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a
org.update_member_repository_creation_permission event.
"""
__schema__ = github_schema
__field_names__ = ("can_create_repositories", "visibility")
can_create_repositories = sgqlc.types.Field(Boolean, graphql_name="canCreateRepositories")
"""Can members create repositories in the organization."""
visibility = sgqlc.types.Field(OrgUpdateMemberRepositoryCreationPermissionAuditEntryVisibility, graphql_name="visibility")
"""The permission for visibility level of repositories for this
organization.
"""
| OrgUpdateMemberRepositoryCreationPermissionAuditEntry |
python | walkccc__LeetCode | solutions/3266. Final Array State After K Multiplication Operations II/3266.py | {
"start": 0,
"end": 1472
} | class ____:
def getFinalState(
self,
nums: list[int],
k: int,
multiplier: int
) -> list[int]:
if multiplier == 1:
return nums
MOD = 1_000_000_007
n = len(nums)
maxNum = max(nums)
ans = [0] * n
minHeap = [(num, i) for i, num in enumerate(nums)]
heapq.heapify(minHeap)
# Keep multiplying the minimum number as close as possible to the maximum
# number in the array. After that, stop multiplying the minimum number
# because it will be greater than the maximum number in the array and break
# the circularity.
while k > 0 and minHeap[0][0] * multiplier <= maxNum:
num, i = heapq.heappop(minHeap)
heapq.heappush(minHeap, (num * multiplier, i))
k -= 1
sortedIndexedNums = sorted(minHeap)
multipliesPerNum, remainingK = divmod(k, n)
# Evenly distribute the remaining multiplications to each number by using
# fast exponentiation.
for index, (num, i) in enumerate(sortedIndexedNums):
sortedIndexedNums[index] = (
sortedIndexedNums[index][0] *
pow(multiplier, multipliesPerNum, MOD) % MOD, i)
# Distribute the remaining multiplications to the minimum `remainingK`
# numbers.
for index in range(remainingK):
sortedIndexedNums[index] = (
sortedIndexedNums[index][0] * multiplier % MOD,
sortedIndexedNums[index][1])
for num, i in sortedIndexedNums:
ans[i] = num
return ans
| Solution |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py | {
"start": 646,
"end": 4267
} | class ____:
def test_empty_graph(self):
G1 = nx.Graph()
G2 = nx.Graph()
gparams = _GraphParameters(G1, G2, None, None, None, None, None)
assert len(set(_matching_order(gparams))) == 0
def test_single_node(self):
G1 = nx.Graph()
G2 = nx.Graph()
G1.add_node(1)
G2.add_node(1)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip(G2, it.cycle(labels_many))),
"label",
)
l1, l2 = (
nx.get_node_attributes(G1, "label"),
nx.get_node_attributes(G2, "label"),
)
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
m = _matching_order(gparams)
assert m == [1]
def test_matching_order(self):
labels = [
"blue",
"blue",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"yellow",
"purple",
"purple",
"blue",
"blue",
]
G1 = nx.Graph(
[
(0, 1),
(0, 2),
(1, 2),
(2, 5),
(2, 4),
(1, 3),
(1, 4),
(3, 6),
(4, 6),
(6, 7),
(7, 8),
(9, 10),
(9, 11),
(11, 12),
(11, 13),
(12, 13),
(10, 13),
]
)
G2 = G1.copy()
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels))), "label")
nx.set_node_attributes(
G2,
dict(zip(G2, it.cycle(labels))),
"label",
)
l1, l2 = (
nx.get_node_attributes(G1, "label"),
nx.get_node_attributes(G2, "label"),
)
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
expected = [9, 11, 10, 13, 12, 1, 2, 4, 0, 3, 6, 5, 7, 8]
assert _matching_order(gparams) == expected
def test_matching_order_all_branches(self):
G1 = nx.Graph(
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4)]
)
G1.add_node(5)
G2 = G1.copy()
G1.nodes[0]["label"] = "black"
G1.nodes[1]["label"] = "blue"
G1.nodes[2]["label"] = "blue"
G1.nodes[3]["label"] = "red"
G1.nodes[4]["label"] = "red"
G1.nodes[5]["label"] = "blue"
G2.nodes[0]["label"] = "black"
G2.nodes[1]["label"] = "blue"
G2.nodes[2]["label"] = "blue"
G2.nodes[3]["label"] = "red"
G2.nodes[4]["label"] = "red"
G2.nodes[5]["label"] = "blue"
l1, l2 = (
nx.get_node_attributes(G1, "label"),
nx.get_node_attributes(G2, "label"),
)
gparams = _GraphParameters(
G1,
G2,
l1,
l2,
nx.utils.groups(l1),
nx.utils.groups(l2),
nx.utils.groups(dict(G2.degree())),
)
expected = [0, 4, 1, 3, 2, 5]
assert _matching_order(gparams) == expected
| TestNodeOrdering |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 4047,
"end": 5736
} | class ____(BaseModel):
"""
Request body for Clear Task Instances endpoint.
"""
model_config = ConfigDict(
extra="forbid",
)
dry_run: Annotated[bool | None, Field(title="Dry Run")] = True
start_date: Annotated[datetime | None, Field(title="Start Date")] = None
end_date: Annotated[datetime | None, Field(title="End Date")] = None
only_failed: Annotated[bool | None, Field(title="Only Failed")] = True
only_running: Annotated[bool | None, Field(title="Only Running")] = False
reset_dag_runs: Annotated[bool | None, Field(title="Reset Dag Runs")] = True
task_ids: Annotated[
list[str | TaskIds] | None,
Field(
description="A list of `task_id` or [`task_id`, `map_index`]. If only the `task_id` is provided for a mapped task, all of its map indices will be targeted.",
title="Task Ids",
),
] = None
dag_run_id: Annotated[str | None, Field(title="Dag Run Id")] = None
include_upstream: Annotated[bool | None, Field(title="Include Upstream")] = False
include_downstream: Annotated[bool | None, Field(title="Include Downstream")] = False
include_future: Annotated[bool | None, Field(title="Include Future")] = False
include_past: Annotated[bool | None, Field(title="Include Past")] = False
run_on_latest_version: Annotated[
bool | None,
Field(
description="(Experimental) Run on the latest bundle version of the dag after clearing the task instances.",
title="Run On Latest Version",
),
] = False
prevent_running_task: Annotated[bool | None, Field(title="Prevent Running Task")] = False
| ClearTaskInstancesBody |
python | coleifer__peewee | peewee.py | {
"start": 62596,
"end": 63202
} | class ____(BaseQuery):
def __init__(self, sql=None, params=None, **kwargs):
super(RawQuery, self).__init__(**kwargs)
self._sql = sql
self._params = params
def __sql__(self, ctx):
ctx.literal(self._sql)
if self._params:
for param in self._params:
ctx.value(param, add_param=False)
return ctx
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
| RawQuery |
python | docker__docker-py | tests/unit/utils_test.py | {
"start": 7323,
"end": 9235
} | class ____(unittest.TestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
of 'file_content' and returns the filename.
Don't forget to unlink the file with os.unlink() after.
"""
local_tempfile = tempfile.NamedTemporaryFile(delete=False)
local_tempfile.write(file_content.encode('UTF-8'))
local_tempfile.close()
return local_tempfile.name
def test_parse_env_file_proper(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_with_equals_character(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=sec==ret')
get_parse_env_file = parse_env_file(env_file)
assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'}
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
get_parse_env_file = parse_env_file(env_file)
assert get_parse_env_file == {'USER': 'jdoe'}
os.unlink(env_file)
def test_parse_env_file_newline(self):
env_file = self.generate_tempfile(
file_content='\nUSER=jdoe\n\n\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
with pytest.raises(DockerException):
parse_env_file(env_file)
os.unlink(env_file)
| ParseEnvFileTest |
python | walkccc__LeetCode | solutions/3099. Harshad Number/3099.py | {
"start": 0,
"end": 252
} | class ____:
def sumOfTheDigitsOfHarshadNumber(self, x: int) -> int:
digitSum = self._getDigitSum(x)
return digitSum if x % digitSum == 0 else -1
def _getDigitSum(self, num: int) -> int:
return sum(int(digit) for digit in str(num))
| Solution |
python | wandb__wandb | wandb/sdk/data_types/bokeh.py | {
"start": 332,
"end": 2910
} | class ____(Media):
"""Wandb class for Bokeh plots.
Args:
val: Bokeh plot
"""
_log_type = "bokeh-file"
def __init__(
self,
data_or_path: Union[
str,
pathlib.Path,
"document.Document",
"model.Model",
],
):
super().__init__()
bokeh = util.get_module(
"bokeh",
required=f"{nameof(Bokeh)!r} requires the bokeh package. Please install it with `pip install bokeh`.",
)
if isinstance(data_or_path, (str, pathlib.Path)) and os.path.exists(
data_or_path
):
data_or_path = str(data_or_path)
with open(data_or_path) as file:
b_json = json.load(file)
self.b_obj = bokeh.document.Document.from_json(b_json)
self._set_file(data_or_path, is_tmp=False, extension=".bokeh.json")
elif isinstance(data_or_path, bokeh.model.Model):
_data = bokeh.document.Document()
_data.add_root(data_or_path)
# serialize/deserialize pairing followed by sorting attributes ensures
# that the file's sha's are equivalent in subsequent calls
self.b_obj = bokeh.document.Document.from_json(_data.to_json())
b_json = self.b_obj.to_json()
if "references" in b_json["roots"]:
b_json["roots"]["references"].sort(key=lambda x: x["id"])
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".bokeh.json")
with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
util.json_dump_safer(b_json, fp)
self._set_file(tmp_path, is_tmp=True, extension=".bokeh.json")
elif not isinstance(data_or_path, bokeh.document.Document):
raise TypeError(
"Bokeh constructor accepts Bokeh document/model or path to Bokeh json file"
)
def get_media_subdir(self):
return os.path.join("media", "bokeh")
def to_json(self, run):
# TODO: (tss) this is getting redundant for all the media objects. We can probably
# pull this into Media#to_json and remove this type override for all the media types.
# There are only a few cases where the type is different between artifacts and runs.
json_dict = super().to_json(run)
json_dict["_type"] = self._log_type
return json_dict
@classmethod
def from_json(cls, json_obj, source_artifact):
return cls(source_artifact.get_entry(json_obj["path"]).download())
| Bokeh |
python | networkx__networkx | networkx/algorithms/bipartite/tests/test_centrality.py | {
"start": 81,
"end": 6362
} | class ____:
@classmethod
def setup_class(cls):
cls.P4 = nx.path_graph(4)
cls.K3 = nx.complete_bipartite_graph(3, 3)
cls.C4 = nx.cycle_graph(4)
cls.davis = nx.davis_southern_women_graph()
cls.top_nodes = [
n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0
]
def test_degree_centrality(self):
d = bipartite.degree_centrality(self.P4, [1, 3])
answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5}
assert d == answer
d = bipartite.degree_centrality(self.K3, [0, 1, 2])
answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
assert d == answer
d = bipartite.degree_centrality(self.C4, [0, 2])
answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
assert d == answer
def test_betweenness_centrality(self):
c = bipartite.betweenness_centrality(self.P4, [1, 3])
answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0}
assert c == answer
c = bipartite.betweenness_centrality(self.K3, [0, 1, 2])
answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125}
assert c == answer
c = bipartite.betweenness_centrality(self.C4, [0, 2])
answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
assert c == answer
def test_closeness_centrality(self):
c = bipartite.closeness_centrality(self.P4, [1, 3])
answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3}
assert c == answer
c = bipartite.closeness_centrality(self.K3, [0, 1, 2])
answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
assert c == answer
c = bipartite.closeness_centrality(self.C4, [0, 2])
answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
assert c == answer
G = nx.Graph()
G.add_node(0)
G.add_node(1)
c = bipartite.closeness_centrality(G, [0])
assert c == {0: 0.0, 1: 0.0}
c = bipartite.closeness_centrality(G, [1])
assert c == {0: 0.0, 1: 0.0}
def test_bipartite_closeness_centrality_unconnected(self):
G = nx.complete_bipartite_graph(3, 3)
G.add_edge(6, 7)
c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False)
answer = {
0: 10.0 / 7,
2: 10.0 / 7,
4: 10.0 / 7,
6: 10.0,
1: 10.0 / 7,
3: 10.0 / 7,
5: 10.0 / 7,
7: 10.0,
}
assert c == answer
def test_davis_degree_centrality(self):
G = self.davis
deg = bipartite.degree_centrality(G, self.top_nodes)
answer = {
"E8": 0.78,
"E9": 0.67,
"E7": 0.56,
"Nora Fayette": 0.57,
"Evelyn Jefferson": 0.57,
"Theresa Anderson": 0.57,
"E6": 0.44,
"Sylvia Avondale": 0.50,
"Laura Mandeville": 0.50,
"Brenda Rogers": 0.50,
"Katherina Rogers": 0.43,
"E5": 0.44,
"Helen Lloyd": 0.36,
"E3": 0.33,
"Ruth DeSand": 0.29,
"Verne Sanderson": 0.29,
"E12": 0.33,
"Myra Liddel": 0.29,
"E11": 0.22,
"Eleanor Nye": 0.29,
"Frances Anderson": 0.29,
"Pearl Oglethorpe": 0.21,
"E4": 0.22,
"Charlotte McDowd": 0.29,
"E10": 0.28,
"Olivia Carleton": 0.14,
"Flora Price": 0.14,
"E2": 0.17,
"E1": 0.17,
"Dorothy Murchison": 0.14,
"E13": 0.17,
"E14": 0.17,
}
for node, value in answer.items():
assert value == pytest.approx(deg[node], abs=1e-2)
def test_davis_betweenness_centrality(self):
G = self.davis
bet = bipartite.betweenness_centrality(G, self.top_nodes)
answer = {
"E8": 0.24,
"E9": 0.23,
"E7": 0.13,
"Nora Fayette": 0.11,
"Evelyn Jefferson": 0.10,
"Theresa Anderson": 0.09,
"E6": 0.07,
"Sylvia Avondale": 0.07,
"Laura Mandeville": 0.05,
"Brenda Rogers": 0.05,
"Katherina Rogers": 0.05,
"E5": 0.04,
"Helen Lloyd": 0.04,
"E3": 0.02,
"Ruth DeSand": 0.02,
"Verne Sanderson": 0.02,
"E12": 0.02,
"Myra Liddel": 0.02,
"E11": 0.02,
"Eleanor Nye": 0.01,
"Frances Anderson": 0.01,
"Pearl Oglethorpe": 0.01,
"E4": 0.01,
"Charlotte McDowd": 0.01,
"E10": 0.01,
"Olivia Carleton": 0.01,
"Flora Price": 0.01,
"E2": 0.00,
"E1": 0.00,
"Dorothy Murchison": 0.00,
"E13": 0.00,
"E14": 0.00,
}
for node, value in answer.items():
assert value == pytest.approx(bet[node], abs=1e-2)
def test_davis_closeness_centrality(self):
G = self.davis
clos = bipartite.closeness_centrality(G, self.top_nodes)
answer = {
"E8": 0.85,
"E9": 0.79,
"E7": 0.73,
"Nora Fayette": 0.80,
"Evelyn Jefferson": 0.80,
"Theresa Anderson": 0.80,
"E6": 0.69,
"Sylvia Avondale": 0.77,
"Laura Mandeville": 0.73,
"Brenda Rogers": 0.73,
"Katherina Rogers": 0.73,
"E5": 0.59,
"Helen Lloyd": 0.73,
"E3": 0.56,
"Ruth DeSand": 0.71,
"Verne Sanderson": 0.71,
"E12": 0.56,
"Myra Liddel": 0.69,
"E11": 0.54,
"Eleanor Nye": 0.67,
"Frances Anderson": 0.67,
"Pearl Oglethorpe": 0.67,
"E4": 0.54,
"Charlotte McDowd": 0.60,
"E10": 0.55,
"Olivia Carleton": 0.59,
"Flora Price": 0.59,
"E2": 0.52,
"E1": 0.52,
"Dorothy Murchison": 0.65,
"E13": 0.52,
"E14": 0.52,
}
for node, value in answer.items():
assert value == pytest.approx(clos[node], abs=1e-2)
| TestBipartiteCentrality |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/mysql.py | {
"start": 1450,
"end": 2378
} | class ____(SQLBatchTestSetup[MySQLDatasourceTestConfig]):
@property
@override
def connection_string(self) -> str:
return "mysql+pymysql://root@localhost/test_ci"
@property
@override
def use_schema(self) -> bool:
return False
@property
@override
def inferrable_types_lookup(self) -> InferrableTypesLookup:
# mysql requires a length for VARCHAR
overrides: InferrableTypesLookup = {
str: sqltypes.VARCHAR(255),
}
return super().inferrable_types_lookup | overrides
@override
def make_asset(self) -> TableAsset:
return self.context.data_sources.add_sql(
name=self._random_resource_name(), connection_string=self.connection_string
).add_table_asset(
name=self._random_resource_name(),
table_name=self.table_name,
schema_name=self.schema,
)
| MySQLBatchTestSetup |
python | doocs__leetcode | solution/0900-0999/0940.Distinct Subsequences II/Solution.py | {
"start": 0,
"end": 437
} | class ____:
def distinctSubseqII(self, s: str) -> int:
mod = 10**9 + 7
n = len(s)
dp = [[0] * 26 for _ in range(n + 1)]
for i, c in enumerate(s, 1):
k = ord(c) - ord('a')
for j in range(26):
if j == k:
dp[i][j] = sum(dp[i - 1]) % mod + 1
else:
dp[i][j] = dp[i - 1][j]
return sum(dp[-1]) % mod
| Solution |
python | ray-project__ray | python/ray/data/_internal/execution/bundle_queue/bundle_queue.py | {
"start": 141,
"end": 1883
} | class ____(abc.ABC):
@abc.abstractmethod
def __len__(self) -> int:
"""Return the number of bundles in the queue."""
...
@abc.abstractmethod
def __contains__(self, bundle: "RefBundle") -> bool:
"""Return whether the bundle is in the queue."""
...
@abc.abstractmethod
def add(self, bundle: "RefBundle") -> None:
"""Add a bundle to the queue."""
...
@abc.abstractmethod
def get_next(self) -> "RefBundle":
"""Remove and return the head of the queue.
Raises:
IndexError: If the queue is empty.
Returns:
A Refbundle if has_next() is True
"""
...
@abc.abstractmethod
def peek_next(self) -> Optional["RefBundle"]:
"""Return the head of the queue without removing it.
If the queue is empty, return `None`.
"""
...
@abc.abstractmethod
def has_next(self) -> bool:
"""Check if the queue has a valid bundle."""
...
@abc.abstractmethod
def remove(self, bundle: "RefBundle"):
"""Remove a bundle from the queue."""
...
@abc.abstractmethod
def clear(self):
"""Remove all bundles from the queue."""
...
@abc.abstractmethod
def estimate_size_bytes(self) -> int:
"""Return an estimate of the total size of objects in the queue."""
...
@abc.abstractmethod
def num_blocks(self) -> int:
"""Return the number of blocks in the queue."""
...
@abc.abstractmethod
def is_empty(self):
"""Return whether this queue and all of its internal data structures are empty.
This method is used for testing.
"""
...
| BundleQueue |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 20963,
"end": 21352
} | class ____(ChainedSource):
def __post_init__(self) -> None:
assert self.base is not None
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen(self.base)
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"{self.base.name()}._type().qualified_name()"
| ScriptObjectQualifiedNameSource |
python | huggingface__transformers | tests/models/lfm2/test_modeling_lfm2.py | {
"start": 1117,
"end": 1420
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = Lfm2Model
def __init__(
self,
parent,
layer_types=["full_attention", "conv"],
):
super().__init__(parent)
self.layer_types = layer_types
@require_torch
| Lfm2ModelTester |
python | great-expectations__great_expectations | tests/integration/fluent/test_integration_datasource.py | {
"start": 21736,
"end": 21883
} | class ____:
context: EphemeralDataContext
datasource: PandasDatasource
dataframe: pd.DataFrame
@dataclass
| ContextPandasDataSourceAndFrame |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py | {
"start": 4438,
"end": 11362
} | class ____(QueryResultsTransformer[Mapping[str, Any]]):
"""
Represents a transformer that converts the query results into a format which is known by the Metrics API.
"""
def __init__(self) -> None:
self._start: datetime | None = None
self._end: datetime | None = None
self._interval: int | None = None
def _assert_transformation_preconditions(self) -> tuple[datetime, datetime, int | None]:
assert self._start is not None and self._end is not None
return self._start, self._end, self._interval
def _build_intermediate_results(
self, query_results: list[QueryResult]
) -> tuple[list[OrderedDict[GroupKey, GroupValue]], list[list[QueryMeta]]]:
"""
Builds a tuple of intermediate groups and metadata which is used to efficiently transform the query results.
Returns:
A tuple of intermediate groups and metadata.
"""
queries_groups: list[OrderedDict[GroupKey, GroupValue]] = []
queries_meta: list[list[QueryMeta]] = []
def _add_to_query_groups(
rows: Sequence[Mapping[str, Any]],
group_bys: list[str],
query_groups: OrderedDict[GroupKey, GroupValue],
add_to_group: Callable[[Mapping[str, Any], GroupValue], None],
) -> None:
if not rows:
query_groups.setdefault(tuple(), GroupValue.empty())
return
for row in rows:
grouped_values = []
for group_by in group_bys:
# We can cast the group by to string because we know that tags must be strings.
grouped_values.append((group_by, cast(str, row.get(group_by))))
group_value = query_groups.setdefault(tuple(grouped_values), GroupValue.empty())
add_to_group(row, group_value)
return
for query_result in query_results:
# All queries must have the same timerange, so under this assumption we take the first occurrence of each.
if self._start is None:
self._start = query_result.modified_start
if self._end is None:
self._end = query_result.modified_end
if self._interval is None:
self._interval = query_result.interval
query_groups: OrderedDict[GroupKey, GroupValue] = OrderedDict()
# We obtain the group bys of the query.
group_bys = query_result.group_bys
# We group the totals data first, since we want the order to be set by the totals.
_add_to_query_groups(
query_result.totals,
group_bys,
query_groups,
lambda value, group: group.add_totals(value.get("aggregate_value")),
)
if query_result.series_query is not None:
# We group the series data second, which will use the already ordered dictionary entries added by the
# totals.
_add_to_query_groups(
query_result.series,
group_bys,
query_groups,
lambda value, group: group.add_series_entry(
cast(str, value.get("time")), value.get("aggregate_value")
),
)
query_meta = []
for meta_item in query_result.meta:
meta_name = meta_item["name"]
meta_type = meta_item["type"]
query_meta.append(QueryMeta(name=meta_name, type=meta_type))
# We add additional metadata from the query themselves to make the API more transparent.
query_meta.append(
QueryMeta(
group_bys=group_bys,
order=query_result.order.value if query_result.order else None,
limit=query_result.limit,
has_more=query_result.has_more,
unit_family=(
query_result.unit_family.value if query_result.unit_family else None
),
unit=query_result.unit,
scaling_factor=query_result.scaling_factor,
)
)
queries_groups.append(query_groups)
queries_meta.append(query_meta)
return queries_groups, queries_meta
def transform(self, query_results: list[QueryResult]) -> Mapping[str, Any]:
"""
Transforms the query results into the Metrics API format.
Returns:
A mapping containing the data transformed in the correct format.
"""
base_result: dict[str, Any] = {
"data": [],
"meta": [],
"start": None,
"end": None,
"intervals": [],
}
if not query_results:
return base_result
# We first build intermediate results that we can work efficiently with.
queries_groups, queries_meta = self._build_intermediate_results(query_results)
# We assert that all the data we require for the transformation has been found during the building of
# intermediate results.
start, end, interval = self._assert_transformation_preconditions()
intervals = None
if interval is not None:
# We build the intervals that we will return to the API user.
intervals = _build_intervals(start, end, interval)
# We build the transformed groups given the intermediate groups.
transformed_queries_groups = []
for query_groups in queries_groups:
translated_query_groups = []
for group_key, group_value in query_groups.items():
base_group: dict[str, Any] = {
"by": {name: value for name, value in group_key},
"totals": undefined_value_to_none(group_value.totals),
}
if intervals is not None and interval is not None:
base_group["series"] = _generate_full_series(
int(start.timestamp()), len(intervals), interval, group_value.series
)
translated_query_groups.append(base_group)
transformed_queries_groups.append(translated_query_groups)
# We build the transformed meta given the intermediate meta.
transformed_queries_meta = []
for query_meta in queries_meta:
transformed_queries_meta.append([meta.meta for meta in query_meta])
base_result["data"] = transformed_queries_groups
base_result["meta"] = transformed_queries_meta
base_result["start"] = start
base_result["end"] = end
if intervals is not None:
base_result["intervals"] = intervals
return base_result
| MetricsAPIQueryResultsTransformer |
python | huggingface__transformers | tests/utils/import_structures/import_structure_raw_register.py | {
"start": 937,
"end": 1095
} | class ____:
def __init__(self):
pass
@requires(
backends=("torch",)
)
def a2():
pass
@requires(
backends=(
"torch",
)
)
| A2 |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 24626,
"end": 26677
} | class ____(BaseDataset):
"""
Feature: Datasets created with gzip compression
"""
def test_gzip(self):
""" Create with explicit gzip options """
dset = self.f.create_dataset(make_name(), (20, 30), compression='gzip',
compression_opts=9)
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 9)
def test_gzip_implicit(self):
""" Create with implicit gzip level (level 4) """
dset = self.f.create_dataset(make_name(), (20, 30), compression='gzip')
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 4)
@pytest.mark.thread_unsafe(reason="monkey-patch")
def test_gzip_number(self):
""" Create with gzip level by specifying integer """
name = make_name()
dset = self.f.create_dataset(name, (20, 30), compression=7)
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 7)
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
with self.assertRaises(ValueError):
dset = self.f.create_dataset(name, (20, 30), compression=7)
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
def test_gzip_exc(self):
""" Illegal gzip level (explicit or implicit) raises ValueError """
name = make_name()
with self.assertRaises((ValueError, RuntimeError)):
self.f.create_dataset(name, (20, 30), compression=14)
with self.assertRaises(ValueError):
self.f.create_dataset(name, (20, 30), compression=-4)
with self.assertRaises(ValueError):
self.f.create_dataset(name, (20, 30), compression='gzip',
compression_opts=14)
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
| TestCreateGzip |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 5136,
"end": 5617
} | class ____(sqltypes.TIME):
"""PostgreSQL TIME type."""
__visit_name__ = "TIME"
def __init__(
self, timezone: bool = False, precision: Optional[int] = None
) -> None:
"""Construct a TIME.
:param timezone: boolean value if timezone present, default False
:param precision: optional integer precision value
.. versionadded:: 1.4
"""
super().__init__(timezone=timezone)
self.precision = precision
| TIME |
python | great-expectations__great_expectations | great_expectations/metrics/query/data_source_table.py | {
"start": 270,
"end": 440
} | class ____(QueryMetric[QueryDataSourceTableResult]):
name = "query.data_source_table"
query: NonEmptyString
data_source_name: NonEmptyString
| QueryDataSourceTable |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow_except.py | {
"start": 4868,
"end": 5333
} | class ____(Executor):
def __init__(self, *args, **kwargs):
raise Exception
@pytest.mark.repeat(10)
@pytest.mark.timeout(10)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_startup_exception_not_hanging1(protocol):
f = Flow(protocol=protocol).add(uses=ExceptionExecutor1)
from jina.excepts import RuntimeFailToStart
with pytest.raises(RuntimeFailToStart):
with f:
pass
| ExceptionExecutor1 |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 104933,
"end": 154188
} | class ____(CFEncodedBase):
DIMENSION_KEY = "_ARRAY_DIMENSIONS"
zarr_version = 2
version_kwargs: dict[str, Any] = {}
def create_zarr_target(self):
raise NotImplementedError
@contextlib.contextmanager
def create_store(self, cache_members: bool = False):
with self.create_zarr_target() as store_target:
yield backends.ZarrStore.open_group(
store_target,
mode="w",
cache_members=cache_members,
**self.version_kwargs,
)
def save(self, dataset, store_target, **kwargs): # type: ignore[override]
return dataset.to_zarr(store=store_target, **kwargs, **self.version_kwargs)
@contextlib.contextmanager
def open(self, path, **kwargs):
with xr.open_dataset(
path, engine="zarr", mode="r", **kwargs, **self.version_kwargs
) as ds:
yield ds
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with self.create_zarr_target() as store_target:
self.save(data, store_target, **save_kwargs)
with self.open(store_target, **open_kwargs) as ds:
yield ds
@pytest.mark.asyncio
@pytest.mark.skipif(
not has_zarr_v3,
reason="zarr-python <3 did not support async loading",
)
async def test_load_async(self) -> None:
await super().test_load_async()
def test_roundtrip_bytes_with_fill_value(self):
pytest.xfail("Broken by Zarr 3.0.7")
@pytest.mark.parametrize("consolidated", [False, True, None])
def test_roundtrip_consolidated(self, consolidated) -> None:
expected = create_test_data()
with self.roundtrip(
expected,
save_kwargs={"consolidated": consolidated},
open_kwargs={"backend_kwargs": {"consolidated": consolidated}},
) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
def test_read_non_consolidated_warning(self) -> None:
expected = create_test_data()
with self.create_zarr_target() as store:
self.save(
expected, store_target=store, consolidated=False, **self.version_kwargs
)
if getattr(store, "supports_consolidated_metadata", True):
with pytest.warns(
RuntimeWarning,
match="Failed to open Zarr store with consolidated",
):
with xr.open_zarr(store, **self.version_kwargs) as ds:
assert_identical(ds, expected)
def test_non_existent_store(self) -> None:
patterns = [
"No such file or directory",
"Unable to find group",
"No group found in store",
"does not exist",
]
with pytest.raises(FileNotFoundError, match=f"({'|'.join(patterns)})"):
xr.open_zarr(f"{uuid.uuid4()}")
@pytest.mark.skipif(has_zarr_v3, reason="chunk_store not implemented in zarr v3")
def test_with_chunkstore(self) -> None:
expected = create_test_data()
with (
self.create_zarr_target() as store_target,
self.create_zarr_target() as chunk_store,
):
save_kwargs = {"chunk_store": chunk_store}
self.save(expected, store_target, **save_kwargs)
# the chunk store must have been populated with some entries
assert len(chunk_store) > 0
open_kwargs = {"backend_kwargs": {"chunk_store": chunk_store}}
with self.open(store_target, **open_kwargs) as ds:
assert_equal(ds, expected)
@requires_dask
def test_auto_chunk(self) -> None:
original = create_test_data().chunk()
with self.roundtrip(original, open_kwargs={"chunks": None}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
@requires_dask
@pytest.mark.filterwarnings("ignore:The specified chunks separate:UserWarning")
def test_manual_chunk(self) -> None:
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
# Using chunks = None should return non-chunked arrays
open_kwargs: dict[str, Any] = {"chunks": None}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
# uniform arrays
for i in range(2, 6):
rechunked = original.chunk(chunks=i)
open_kwargs = {"chunks": i}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as rechunked
assert v.chunks == rechunked[k].chunks
chunks = {"dim1": 2, "dim2": 3, "dim3": 5}
rechunked = original.chunk(chunks=chunks)
open_kwargs = {
"chunks": chunks,
"backend_kwargs": {"overwrite_encoded_chunks": True},
}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
with self.roundtrip(actual) as auto:
# encoding should have changed
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
assert_identical(actual, auto)
assert_identical(actual.load(), auto.load())
def test_unlimited_dims_encoding_is_ignored(self) -> None:
ds = Dataset({"x": np.arange(10)})
ds.encoding = {"unlimited_dims": ["x"]}
with self.roundtrip(ds) as actual:
assert_identical(ds, actual)
@requires_dask
@pytest.mark.filterwarnings("ignore:.*does not have a Zarr V3 specification.*")
def test_warning_on_bad_chunks(self) -> None:
original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 3})
bad_chunks = (2, {"dim2": (3, 3, 2, 1)})
for chunks in bad_chunks:
kwargs = {"chunks": chunks}
with pytest.warns(UserWarning):
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
good_chunks: tuple[dict[str, Any], ...] = ({"dim2": 3}, {"dim3": (6, 4)}, {})
for chunks in good_chunks:
kwargs = {"chunks": chunks}
with assert_no_warnings():
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*Zarr format 3 specification.*",
category=UserWarning,
)
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
@requires_dask
def test_deprecate_auto_chunk(self) -> None:
original = create_test_data().chunk()
with pytest.raises(TypeError):
with self.roundtrip(original, open_kwargs={"auto_chunk": True}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
with pytest.raises(TypeError):
with self.roundtrip(original, open_kwargs={"auto_chunk": False}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
@requires_dask
def test_write_uneven_dask_chunks(self) -> None:
# regression for GH#2225
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual:
for k, v in actual.data_vars.items():
assert v.chunks == actual[k].chunks
def test_chunk_encoding(self) -> None:
# These datasets have no dask chunks. All chunking specified in
# encoding
data = create_test_data()
chunks = (5, 5)
data["var2"].encoding.update({"chunks": chunks})
with self.roundtrip(data) as actual:
assert chunks == actual["var2"].encoding["chunks"]
# expect an error with non-integer chunks
data["var2"].encoding.update({"chunks": (5, 4.5)})
with pytest.raises(TypeError):
with self.roundtrip(data) as actual:
pass
def test_shard_encoding(self) -> None:
# These datasets have no dask chunks. All chunking/sharding specified in
# encoding
if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3:
data = create_test_data()
chunks = (1, 1)
shards = (5, 5)
data["var2"].encoding.update({"chunks": chunks})
data["var2"].encoding.update({"shards": shards})
with self.roundtrip(data) as actual:
assert shards == actual["var2"].encoding["shards"]
# expect an error with shards not divisible by chunks
data["var2"].encoding.update({"chunks": (2, 2)})
with pytest.raises(ValueError):
with self.roundtrip(data) as actual:
pass
@requires_dask
@pytest.mark.skipif(
ON_WINDOWS,
reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.",
)
def test_chunk_encoding_with_dask(self) -> None:
# These datasets DO have dask chunks. Need to check for various
# interactions between dask and zarr chunks
ds = xr.DataArray((np.arange(12)), dims="x", name="var1").to_dataset()
# - no encoding specified -
# zarr automatically gets chunk information from dask chunks
ds_chunk4 = ds.chunk({"x": 4})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# should fail if dask_chunks are irregular...
ds_chunk_irreg = ds.chunk({"x": (5, 4, 3)})
with pytest.raises(ValueError, match=r"uniform chunk sizes."):
with self.roundtrip(ds_chunk_irreg) as actual:
pass
# should fail if encoding["chunks"] clashes with dask_chunks
badenc = ds.chunk({"x": 4})
badenc.var1.encoding["chunks"] = (6,)
with pytest.raises(ValueError, match=r"named 'var1' would overlap"):
with self.roundtrip(badenc) as actual:
pass
# unless...
with self.roundtrip(badenc, save_kwargs={"safe_chunks": False}) as actual:
# don't actually check equality because the data could be corrupted
pass
# if dask chunks (4) are an integer multiple of zarr chunks (2) it should not fail...
goodenc = ds.chunk({"x": 4})
goodenc.var1.encoding["chunks"] = (2,)
with self.roundtrip(goodenc) as actual:
pass
# if initial dask chunks are aligned, size of last dask chunk doesn't matter
goodenc = ds.chunk({"x": (3, 3, 6)})
goodenc.var1.encoding["chunks"] = (3,)
with self.roundtrip(goodenc) as actual:
pass
goodenc = ds.chunk({"x": (3, 6, 3)})
goodenc.var1.encoding["chunks"] = (3,)
with self.roundtrip(goodenc) as actual:
pass
# ... also if the last chunk is irregular
ds_chunk_irreg = ds.chunk({"x": (5, 5, 2)})
with self.roundtrip(ds_chunk_irreg) as actual:
assert (5,) == actual["var1"].encoding["chunks"]
# re-save Zarr arrays
with self.roundtrip(ds_chunk_irreg) as original:
with self.roundtrip(original) as actual:
assert_identical(original, actual)
# but intermediate unaligned chunks are bad
badenc = ds.chunk({"x": (3, 5, 3, 1)})
badenc.var1.encoding["chunks"] = (3,)
with pytest.raises(ValueError, match=r"would overlap multiple Dask chunks"):
with self.roundtrip(badenc) as actual:
pass
# - encoding specified -
# specify compatible encodings
for chunk_enc in 4, (4,):
ds_chunk4["var1"].encoding.update({"chunks": chunk_enc})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# TODO: remove this failure once synchronized overlapping writes are
# supported by xarray
ds_chunk4["var1"].encoding.update({"chunks": 5})
with pytest.raises(ValueError, match=r"named 'var1' would overlap"):
with self.roundtrip(ds_chunk4) as actual:
pass
# override option
with self.roundtrip(ds_chunk4, save_kwargs={"safe_chunks": False}) as actual:
# don't actually check equality because the data could be corrupted
pass
@requires_netcdf
def test_drop_encoding(self):
with open_example_dataset("example_1.nc") as ds:
encodings = {v: {**ds[v].encoding} for v in ds.data_vars}
with self.create_zarr_target() as store:
ds.to_zarr(store, encoding=encodings)
def test_hidden_zarr_keys(self) -> None:
skip_if_zarr_format_3("This test is unnecessary; no hidden Zarr keys")
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
zarr_group = store.ds
# check that a variable hidden attribute is present and correct
# JSON only has a single array type, which maps to list in Python.
# In contrast, dims in xarray is always a tuple.
for var in expected.variables.keys():
dims = zarr_group[var].attrs[self.DIMENSION_KEY]
assert dims == list(expected[var].dims)
with xr.decode_cf(store):
# make sure it is hidden
for var in expected.variables.keys():
assert self.DIMENSION_KEY not in expected[var].attrs
# put it back and try removing from a variable
attrs = dict(zarr_group["var2"].attrs)
del attrs[self.DIMENSION_KEY]
zarr_group["var2"].attrs.put(attrs)
with pytest.raises(KeyError):
with xr.decode_cf(store):
pass
def test_dimension_names(self) -> None:
skip_if_zarr_format_2("No dimension names in V2")
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
zarr_group = store.ds
for var in zarr_group:
assert expected[var].dims == zarr_group[var].metadata.dimension_names
@pytest.mark.parametrize("group", [None, "group1"])
def test_write_persistence_modes(self, group) -> None:
original = create_test_data()
# overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# don't overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w-", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# make sure overwriting works as expected
with self.create_zarr_target() as store:
self.save(original, store)
# should overwrite with no error
self.save(original, store, mode="w", group=group)
with self.open(store, group=group) as actual:
assert_identical(original, actual)
with pytest.raises((ValueError, FileExistsError)):
self.save(original, store, mode="w-")
# check append mode for normal write
with self.roundtrip(
original,
save_kwargs={"mode": "a", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# check append mode for append write
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", group=group, **self.version_kwargs)
ds_to_append.to_zarr(
store_target, append_dim="time", group=group, **self.version_kwargs
)
original = xr.concat([ds, ds_to_append], dim="time")
actual = xr.open_dataset(
store_target, group=group, engine="zarr", **self.version_kwargs
)
assert_identical(original, actual)
def test_compressor_encoding(self) -> None:
# specify a custom compressor
original = create_test_data()
if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3:
encoding_key = "compressors"
# all parameters need to be explicitly specified in order for the comparison to pass below
encoding = {
"serializer": zarr.codecs.BytesCodec(endian="little"),
encoding_key: (
zarr.codecs.BloscCodec(
cname="zstd",
clevel=3,
shuffle="shuffle",
typesize=8,
blocksize=0,
),
),
}
else:
from numcodecs.blosc import Blosc
encoding_key = "compressors" if has_zarr_v3 else "compressor"
comp = Blosc(cname="zstd", clevel=3, shuffle=2)
encoding = {encoding_key: (comp,) if has_zarr_v3 else comp}
save_kwargs = dict(encoding={"var1": encoding})
with self.roundtrip(original, save_kwargs=save_kwargs) as ds:
enc = ds["var1"].encoding[encoding_key]
assert enc == encoding[encoding_key]
def test_group(self) -> None:
original = create_test_data()
group = "some/random/path"
with self.roundtrip(
original, save_kwargs={"group": group}, open_kwargs={"group": group}
) as actual:
assert_identical(original, actual)
def test_zarr_mode_w_overwrites_encoding(self) -> None:
data = Dataset({"foo": ("x", [1.0, 1.0, 1.0])})
with self.create_zarr_target() as store:
data.to_zarr(
store, **self.version_kwargs, encoding={"foo": {"add_offset": 1}}
)
np.testing.assert_equal(
zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data - 1
)
data.to_zarr(
store,
**self.version_kwargs,
encoding={"foo": {"add_offset": 0}},
mode="w",
)
np.testing.assert_equal(
zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data
)
def test_encoding_kwarg_fixed_width_string(self) -> None:
# not relevant for zarr, since we don't use EncodedStringCoder
pass
def test_dataset_caching(self) -> None:
super().test_dataset_caching()
def test_append_write(self) -> None:
super().test_append_write()
def test_append_with_mode_rplus_success(self) -> None:
original = Dataset({"foo": ("x", [1])})
modified = Dataset({"foo": ("x", [2])})
with self.create_zarr_target() as store:
original.to_zarr(store, **self.version_kwargs)
modified.to_zarr(store, mode="r+", **self.version_kwargs)
with self.open(store) as actual:
assert_identical(actual, modified)
def test_append_with_mode_rplus_fails(self) -> None:
original = Dataset({"foo": ("x", [1])})
modified = Dataset({"bar": ("x", [2])})
with self.create_zarr_target() as store:
original.to_zarr(store, **self.version_kwargs)
with pytest.raises(
ValueError, match="dataset contains non-pre-existing variables"
):
modified.to_zarr(store, mode="r+", **self.version_kwargs)
def test_append_with_invalid_dim_raises(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
with pytest.raises(
ValueError, match="does not match any existing dataset dimensions"
):
ds_to_append.to_zarr(
store_target, append_dim="notvalid", **self.version_kwargs
)
def test_append_with_no_dims_raises(self) -> None:
with self.create_zarr_target() as store_target:
Dataset({"foo": ("x", [1])}).to_zarr(
store_target, mode="w", **self.version_kwargs
)
with pytest.raises(ValueError, match="different dimension names"):
Dataset({"foo": ("y", [2])}).to_zarr(
store_target, mode="a", **self.version_kwargs
)
def test_append_with_append_dim_not_set_raises(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
with pytest.raises(ValueError, match="different dimension sizes"):
ds_to_append.to_zarr(store_target, mode="a", **self.version_kwargs)
def test_append_with_mode_not_a_raises(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
with pytest.raises(ValueError, match="cannot set append_dim unless"):
ds_to_append.to_zarr(
store_target, mode="w", append_dim="time", **self.version_kwargs
)
def test_append_with_existing_encoding_raises(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
with pytest.raises(ValueError, match="but encoding was provided"):
ds_to_append.to_zarr(
store_target,
append_dim="time",
encoding={"da": {"compressor": None}},
**self.version_kwargs,
)
@pytest.mark.parametrize("dtype", ["U", "S"])
def test_append_string_length_mismatch_raises(self, dtype) -> None:
if has_zarr_v3 and not has_zarr_v3_dtypes:
skip_if_zarr_format_3("This actually works fine with Zarr format 3")
ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype)
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
with pytest.raises(ValueError, match="Mismatched dtypes for variable"):
ds_to_append.to_zarr(
store_target, append_dim="time", **self.version_kwargs
)
@pytest.mark.parametrize("dtype", ["U", "S"])
def test_append_string_length_mismatch_works(self, dtype) -> None:
skip_if_zarr_format_2("This doesn't work with Zarr format 2")
# ...but it probably would if we used object dtype
if has_zarr_v3_dtypes:
pytest.skip("This works on pre ZDtype Zarr-Python, but fails after.")
ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype)
expected = xr.concat([ds, ds_to_append], dim="time")
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs)
actual = xr.open_dataset(store_target, engine="zarr")
xr.testing.assert_identical(expected, actual)
def test_check_encoding_is_consistent_after_append(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
# check encoding consistency
with self.create_zarr_target() as store_target:
import numcodecs
encoding_value: Any
if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3:
compressor = zarr.codecs.BloscCodec()
else:
compressor = numcodecs.Blosc()
encoding_key = "compressors" if has_zarr_v3 else "compressor"
encoding_value = (compressor,) if has_zarr_v3 else compressor
encoding = {"da": {encoding_key: encoding_value}}
ds.to_zarr(store_target, mode="w", encoding=encoding, **self.version_kwargs)
original_ds = xr.open_dataset(
store_target, engine="zarr", **self.version_kwargs
)
original_encoding = original_ds["da"].encoding[encoding_key]
ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs)
actual_ds = xr.open_dataset(
store_target, engine="zarr", **self.version_kwargs
)
actual_encoding = actual_ds["da"].encoding[encoding_key]
assert original_encoding == actual_encoding
assert_identical(
xr.open_dataset(
store_target, engine="zarr", **self.version_kwargs
).compute(),
xr.concat([ds, ds_to_append], dim="time"),
)
def test_append_with_new_variable(self) -> None:
ds, ds_to_append, ds_with_new_var = create_append_test_data()
# check append mode for new variable
with self.create_zarr_target() as store_target:
combined = xr.concat([ds, ds_to_append], dim="time")
combined.to_zarr(store_target, mode="w", **self.version_kwargs)
assert_identical(
combined,
xr.open_dataset(store_target, engine="zarr", **self.version_kwargs),
)
ds_with_new_var.to_zarr(store_target, mode="a", **self.version_kwargs)
combined = xr.concat([ds, ds_to_append], dim="time")
combined["new_var"] = ds_with_new_var["new_var"]
assert_identical(
combined,
xr.open_dataset(store_target, engine="zarr", **self.version_kwargs),
)
def test_append_with_append_dim_no_overwrite(self) -> None:
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", **self.version_kwargs)
original = xr.concat([ds, ds_to_append], dim="time")
original2 = xr.concat([original, ds_to_append], dim="time")
# overwrite a coordinate;
# for mode='a-', this will not get written to the store
# because it does not have the append_dim as a dim
lon = ds_to_append.lon.to_numpy().copy()
lon[:] = -999
ds_to_append["lon"] = lon
ds_to_append.to_zarr(
store_target, mode="a-", append_dim="time", **self.version_kwargs
)
actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs)
assert_identical(original, actual)
# by default, mode="a" will overwrite all coordinates.
ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs)
actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs)
lon = original2.lon.to_numpy().copy()
lon[:] = -999
original2["lon"] = lon
assert_identical(original2, actual)
@requires_dask
def test_to_zarr_compute_false_roundtrip(self) -> None:
from dask.delayed import Delayed
original = create_test_data().chunk()
with self.create_zarr_target() as store:
delayed_obj = self.save(original, store, compute=False)
assert isinstance(delayed_obj, Delayed)
# make sure target store has not been written to yet
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(original, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(original, actual)
@requires_dask
def test_to_zarr_append_compute_false_roundtrip(self) -> None:
from dask.delayed import Delayed
ds, ds_to_append, _ = create_append_test_data()
ds, ds_to_append = ds.chunk(), ds_to_append.chunk()
with pytest.warns(SerializationWarning):
with self.create_zarr_target() as store:
delayed_obj = self.save(ds, store, compute=False, mode="w")
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj = self.save(
ds_to_append, store, compute=False, append_dim="time"
)
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(
xr.concat([ds, ds_to_append], dim="time"), actual
)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(xr.concat([ds, ds_to_append], dim="time"), actual)
@pytest.mark.parametrize("chunk", [False, True])
def test_save_emptydim(self, chunk) -> None:
if chunk and not has_dask:
pytest.skip("requires dask")
ds = Dataset({"x": (("a", "b"), np.empty((5, 0))), "y": ("a", [1, 2, 5, 8, 9])})
if chunk:
ds = ds.chunk({}) # chunk dataset to save dask array
with self.roundtrip(ds) as ds_reload:
assert_identical(ds, ds_reload)
@requires_dask
def test_no_warning_from_open_emptydim_with_chunks(self) -> None:
ds = Dataset({"x": (("a", "b"), np.empty((5, 0)))}).chunk({"a": 1})
with assert_no_warnings():
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*Zarr format 3 specification.*",
category=UserWarning,
)
with self.roundtrip(ds, open_kwargs=dict(chunks={"a": 1})) as ds_reload:
assert_identical(ds, ds_reload)
@pytest.mark.parametrize("consolidated", [False, True, None])
@pytest.mark.parametrize("compute", [False, True])
@pytest.mark.parametrize("use_dask", [False, True])
@pytest.mark.parametrize("write_empty", [False, True, None])
def test_write_region(self, consolidated, compute, use_dask, write_empty) -> None:
if (use_dask or not compute) and not has_dask:
pytest.skip("requires dask")
zeros = Dataset({"u": (("x",), np.zeros(10))})
nonzeros = Dataset({"u": (("x",), np.arange(1, 11))})
if use_dask:
zeros = zeros.chunk(2)
nonzeros = nonzeros.chunk(2)
with self.create_zarr_target() as store:
zeros.to_zarr(
store,
consolidated=consolidated,
compute=compute,
encoding={"u": dict(chunks=2)},
**self.version_kwargs,
)
if compute:
with xr.open_zarr(
store, consolidated=consolidated, **self.version_kwargs
) as actual:
assert_identical(actual, zeros)
for i in range(0, 10, 2):
region = {"x": slice(i, i + 2)}
nonzeros.isel(region).to_zarr(
store,
region=region,
consolidated=consolidated,
write_empty_chunks=write_empty,
**self.version_kwargs,
)
with xr.open_zarr(
store, consolidated=consolidated, **self.version_kwargs
) as actual:
assert_identical(actual, nonzeros)
def test_region_scalar(self) -> None:
ds = Dataset({"x": 0})
with self.create_zarr_target() as store:
ds.to_zarr(store)
ds.to_zarr(store, region={}, mode="r+")
with xr.open_zarr(store) as actual:
assert_identical(actual, ds)
@pytest.mark.parametrize("mode", [None, "r+", "a"])
def test_write_region_mode(self, mode) -> None:
zeros = Dataset({"u": (("x",), np.zeros(10))})
nonzeros = Dataset({"u": (("x",), np.arange(1, 11))})
with self.create_zarr_target() as store:
zeros.to_zarr(store, **self.version_kwargs)
for region in [{"x": slice(5)}, {"x": slice(5, 10)}]:
nonzeros.isel(region).to_zarr(
store, region=region, mode=mode, **self.version_kwargs
)
with xr.open_zarr(store, **self.version_kwargs) as actual:
assert_identical(actual, nonzeros)
@requires_dask
def test_write_preexisting_override_metadata(self) -> None:
"""Metadata should be overridden if mode="a" but not in mode="r+"."""
original = Dataset(
{"u": (("x",), np.zeros(10), {"variable": "original"})},
attrs={"global": "original"},
)
both_modified = Dataset(
{"u": (("x",), np.ones(10), {"variable": "modified"})},
attrs={"global": "modified"},
)
global_modified = Dataset(
{"u": (("x",), np.ones(10), {"variable": "original"})},
attrs={"global": "modified"},
)
only_new_data = Dataset(
{"u": (("x",), np.ones(10), {"variable": "original"})},
attrs={"global": "original"},
)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False, **self.version_kwargs)
both_modified.to_zarr(store, mode="a", **self.version_kwargs)
with self.open(store) as actual:
# NOTE: this arguably incorrect -- we should probably be
# overriding the variable metadata, too. See the TODO note in
# ZarrStore.set_variables.
assert_identical(actual, global_modified)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False, **self.version_kwargs)
both_modified.to_zarr(store, mode="r+", **self.version_kwargs)
with self.open(store) as actual:
assert_identical(actual, only_new_data)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False, **self.version_kwargs)
# with region, the default mode becomes r+
both_modified.to_zarr(
store, region={"x": slice(None)}, **self.version_kwargs
)
with self.open(store) as actual:
assert_identical(actual, only_new_data)
def test_write_region_errors(self) -> None:
data = Dataset({"u": (("x",), np.arange(5))})
data2 = Dataset({"u": (("x",), np.array([10, 11]))})
@contextlib.contextmanager
def setup_and_verify_store(expected=data):
with self.create_zarr_target() as store:
data.to_zarr(store, **self.version_kwargs)
yield store
with self.open(store) as actual:
assert_identical(actual, expected)
# verify the base case works
expected = Dataset({"u": (("x",), np.array([10, 11, 2, 3, 4]))})
with setup_and_verify_store(expected) as store:
data2.to_zarr(store, region={"x": slice(2)}, **self.version_kwargs)
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=re.escape(
"cannot set region unless mode='a', mode='a-', mode='r+' or mode=None"
),
):
data.to_zarr(
store, region={"x": slice(None)}, mode="w", **self.version_kwargs
)
with setup_and_verify_store() as store:
with pytest.raises(TypeError, match=r"must be a dict"):
data.to_zarr(store, region=slice(None), **self.version_kwargs) # type: ignore[call-overload]
with setup_and_verify_store() as store:
with pytest.raises(TypeError, match=r"must be slice objects"):
data2.to_zarr(store, region={"x": [0, 1]}, **self.version_kwargs) # type: ignore[dict-item]
with setup_and_verify_store() as store:
with pytest.raises(ValueError, match=r"step on all slices"):
data2.to_zarr(
store, region={"x": slice(None, None, 2)}, **self.version_kwargs
)
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"all keys in ``region`` are not in Dataset dimensions",
):
data.to_zarr(store, region={"y": slice(None)}, **self.version_kwargs)
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"all variables in the dataset to write must have at least one dimension in common",
):
data2.assign(v=2).to_zarr(
store, region={"x": slice(2)}, **self.version_kwargs
)
with setup_and_verify_store() as store:
with pytest.raises(
ValueError, match=r"cannot list the same dimension in both"
):
data.to_zarr(
store,
region={"x": slice(None)},
append_dim="x",
**self.version_kwargs,
)
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"variable 'u' already exists with different dimension sizes",
):
data2.to_zarr(store, region={"x": slice(3)}, **self.version_kwargs)
@requires_dask
def test_encoding_chunksizes(self) -> None:
# regression test for GH2278
# see also test_encoding_chunksizes_unlimited
nx, ny, nt = 4, 4, 5
original = xr.Dataset(
{},
coords={
"x": np.arange(nx),
"y": np.arange(ny),
"t": np.arange(nt),
},
)
original["v"] = xr.Variable(("x", "y", "t"), np.zeros((nx, ny, nt)))
original = original.chunk({"t": 1, "x": 2, "y": 2})
with self.roundtrip(original) as ds1:
assert_equal(ds1, original)
with self.roundtrip(ds1.isel(t=0)) as ds2:
assert_equal(ds2, original.isel(t=0))
@requires_dask
def test_chunk_encoding_with_partial_dask_chunks(self) -> None:
original = xr.Dataset(
{"x": xr.DataArray(np.random.random(size=(6, 8)), dims=("a", "b"))}
).chunk({"a": 3})
with self.roundtrip(
original, save_kwargs={"encoding": {"x": {"chunks": [3, 2]}}}
) as ds1:
assert_equal(ds1, original)
@requires_dask
def test_chunk_encoding_with_larger_dask_chunks(self) -> None:
original = xr.Dataset({"a": ("x", [1, 2, 3, 4])}).chunk({"x": 2})
with self.roundtrip(
original, save_kwargs={"encoding": {"a": {"chunks": [1]}}}
) as ds1:
assert_equal(ds1, original)
@requires_cftime
def test_open_zarr_use_cftime(self) -> None:
ds = create_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, **self.version_kwargs)
ds_a = xr.open_zarr(store_target, **self.version_kwargs)
assert_identical(ds, ds_a)
decoder = CFDatetimeCoder(use_cftime=True)
ds_b = xr.open_zarr(
store_target, decode_times=decoder, **self.version_kwargs
)
assert xr.coding.times.contains_cftime_datetimes(ds_b.time.variable)
def test_write_read_select_write(self) -> None:
# Test for https://github.com/pydata/xarray/issues/4084
ds = create_test_data()
# NOTE: using self.roundtrip, which uses open_dataset, will not trigger the bug.
with self.create_zarr_target() as initial_store:
ds.to_zarr(initial_store, mode="w", **self.version_kwargs)
ds1 = xr.open_zarr(initial_store, **self.version_kwargs)
# Combination of where+squeeze triggers error on write.
ds_sel = ds1.where(ds1.coords["dim3"] == "a", drop=True).squeeze("dim3")
with self.create_zarr_target() as final_store:
ds_sel.to_zarr(final_store, mode="w", **self.version_kwargs)
@pytest.mark.parametrize("obj", [Dataset(), DataArray(name="foo")])
def test_attributes(self, obj) -> None:
obj = obj.copy()
obj.attrs["good"] = {"key": "value"}
ds = obj if isinstance(obj, Dataset) else obj.to_dataset()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, **self.version_kwargs)
assert_identical(ds, xr.open_zarr(store_target, **self.version_kwargs))
obj.attrs["bad"] = DataArray()
ds = obj if isinstance(obj, Dataset) else obj.to_dataset()
with self.create_zarr_target() as store_target:
with pytest.raises(TypeError, match=r"Invalid attribute in Dataset.attrs."):
ds.to_zarr(store_target, **self.version_kwargs)
@requires_dask
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_chunked_datetime64_or_timedelta64(self, dtype) -> None:
# Generalized from @malmans2's test in PR #8253
original = create_test_data().astype(dtype).chunk(1)
with self.roundtrip(
original,
open_kwargs={
"chunks": {},
"decode_timedelta": CFTimedeltaCoder(time_unit="ns"),
},
) as actual:
for name, actual_var in actual.variables.items():
assert original[name].chunks == actual_var.chunks
assert original.chunks == actual.chunks
@requires_cftime
@requires_dask
def test_chunked_cftime_datetime(self) -> None:
# Based on @malmans2's test in PR #8253
times = date_range("2000", freq="D", periods=3, use_cftime=True)
original = xr.Dataset(data_vars={"chunked_times": (["time"], times)})
original = original.chunk({"time": 1})
with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual:
for name, actual_var in actual.variables.items():
assert original[name].chunks == actual_var.chunks
assert original.chunks == actual.chunks
def test_cache_members(self) -> None:
"""
Ensure that if `ZarrStore` is created with `cache_members` set to `True`,
a `ZarrStore` only inspects the underlying zarr group once,
and that the results of that inspection are cached.
Otherwise, `ZarrStore.members` should inspect the underlying zarr group each time it is
invoked
"""
with self.create_zarr_target() as store_target:
zstore_mut = backends.ZarrStore.open_group(
store_target, mode="w", cache_members=False
)
# ensure that the keys are sorted
array_keys = sorted(("foo", "bar"))
# create some arrays
for ak in array_keys:
zstore_mut.zarr_group.create(name=ak, shape=(1,), dtype="uint8")
zstore_stat = backends.ZarrStore.open_group(
store_target, mode="r", cache_members=True
)
observed_keys_0 = sorted(zstore_stat.array_keys())
assert observed_keys_0 == array_keys
# create a new array
new_key = "baz"
zstore_mut.zarr_group.create(name=new_key, shape=(1,), dtype="uint8")
observed_keys_1 = sorted(zstore_stat.array_keys())
assert observed_keys_1 == array_keys
observed_keys_2 = sorted(zstore_mut.array_keys())
assert observed_keys_2 == sorted(array_keys + [new_key])
@requires_dask
@pytest.mark.parametrize("dtype", [int, float])
def test_zarr_fill_value_setting(self, dtype):
# When zarr_format=2, _FillValue sets fill_value
# When zarr_format=3, fill_value is set independently
# We test this by writing a dask array with compute=False,
# on read we should receive chunks filled with `fill_value`
fv = -1
ds = xr.Dataset(
{"foo": ("x", dask.array.from_array(np.array([0, 0, 0], dtype=dtype)))}
)
expected = xr.Dataset({"foo": ("x", [fv] * 3)})
zarr_format_2 = (
has_zarr_v3 and zarr.config.get("default_zarr_format") == 2
) or not has_zarr_v3
if zarr_format_2:
attr = "_FillValue"
expected.foo.attrs[attr] = fv
else:
attr = "fill_value"
if dtype is float:
# for floats, Xarray inserts a default `np.nan`
expected.foo.attrs["_FillValue"] = np.nan
# turn off all decoding so we see what Zarr returns to us.
# Since chunks, are not written, we should receive on `fill_value`
open_kwargs = {
"mask_and_scale": False,
"consolidated": False,
"use_zarr_fill_value_as_mask": False,
}
save_kwargs = dict(compute=False, consolidated=False)
with self.roundtrip(
ds,
save_kwargs=ChainMap(save_kwargs, dict(encoding={"foo": {attr: fv}})),
open_kwargs=open_kwargs,
) as actual:
assert_identical(actual, expected)
ds.foo.encoding[attr] = fv
with self.roundtrip(
ds, save_kwargs=save_kwargs, open_kwargs=open_kwargs
) as actual:
assert_identical(actual, expected)
if zarr_format_2:
ds = ds.drop_encoding()
with pytest.raises(ValueError, match="_FillValue"):
with self.roundtrip(
ds,
save_kwargs=ChainMap(
save_kwargs, dict(encoding={"foo": {"fill_value": fv}})
),
open_kwargs=open_kwargs,
):
pass
# TODO: this doesn't fail because of the
# ``raise_on_invalid=vn in check_encoding_set`` line in zarr.py
# ds.foo.encoding["fill_value"] = fv
@requires_zarr
@pytest.mark.skipif(
KVStore is None, reason="zarr-python 2.x or ZARR_V3_EXPERIMENTAL_API is unset."
)
| ZarrBase |
python | nedbat__coveragepy | tests/test_html.py | {
"start": 6370,
"end": 6869
} | class ____:
"""A fake object to track how `open` is used to write files."""
def __init__(self, written: set[str]) -> None:
self.written = written
def open(self, filename: str, mode: str = "r", encoding: str | None = None) -> IO[str]:
"""Be just like `open`, but write written file names to `self.written`."""
if mode.startswith("w"):
self.written.add(filename.replace("\\", "/"))
return open(filename, mode, encoding=encoding)
| FileWriteTracker |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/fileassociations.py | {
"start": 1076,
"end": 3274
} | class ____(QDialog):
"""Input text dialog with regex validation."""
def __init__(self, parent=None, title='', label=''):
"""Input text dialog with regex validation."""
super().__init__(parent=parent)
self._reg = None
self._regex = None
# Widgets
self.label = QLabel()
self.lineedit = QLineEdit()
self.button_box = SpyderDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
self.button_ok = self.button_box.button(QDialogButtonBox.Ok)
self.button_cancel = self.button_box.button(QDialogButtonBox.Cancel)
# Widget setup
self.setWindowTitle(title)
self.setMinimumWidth(500) # FIXME: use metrics
self.label.setText(label)
# Layout
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.lineedit)
layout.addSpacing(24) # FIXME: use metrics
layout.addWidget(self.button_box)
self.setLayout(layout)
# Signals
self.button_ok.clicked.connect(self.accept)
self.button_cancel.clicked.connect(self.reject)
self.lineedit.textChanged.connect(self.validate)
self.validate()
def validate(self):
"""Validate content."""
text = self.text().strip()
is_valid = bool(text)
if self._reg:
res = self._reg.match(text)
if res:
text_matched = res.group(0)
is_valid = is_valid and text_matched == text
else:
is_valid = False
self.button_ok.setEnabled(is_valid)
def set_regex_validation(self, regex):
"""Set the regular expression to validate content."""
self._regex = regex
self._reg = re.compile(regex, re.IGNORECASE)
validator = QRegularExpressionValidator(QRegularExpression(regex))
self.lineedit.setValidator(validator)
def text(self):
"""Return the text of the lineedit."""
return self.lineedit.text()
def set_text(self, text):
"""Set the text of the lineedit."""
self.lineedit.setText(text)
self.validate()
| InputTextDialog |
python | django__django | tests/model_formsets/models.py | {
"start": 1461,
"end": 1611
} | class ____(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return "%s - %s" % (self.title, self.notes)
| AlternateBook |
python | anthropics__anthropic-sdk-python | tests/test_utils/test_proxy.py | {
"start": 120,
"end": 1007
} | class ____(LazyProxy[Any]):
@override
def __load__(self) -> Any:
return self
def __call__(self, *_args: Any, **_kwds: Any) -> Any:
raise RuntimeError("This should never be called!")
def test_recursive_proxy() -> None:
proxy = RecursiveLazyProxy()
assert repr(proxy) == "RecursiveLazyProxy"
assert str(proxy) == "RecursiveLazyProxy"
assert dir(proxy) == []
assert type(proxy).__name__ == "RecursiveLazyProxy"
assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy"
def test_isinstance_does_not_error() -> None:
class AlwaysErrorProxy(LazyProxy[Any]):
@override
def __load__(self) -> Any:
raise RuntimeError("Mocking missing dependency")
proxy = AlwaysErrorProxy()
assert not isinstance(proxy, dict)
assert isinstance(proxy, LazyProxy)
| RecursiveLazyProxy |
python | viewflow__viewflow | viewflow/workflow/flow/nodes.py | {
"start": 10097,
"end": 14367
} | class ____(
mixins.NodeDetailMixin,
mixins.NodeReviveMixin,
nodes.SplitFirst,
):
"""
Parallel split, as soon as the first task is completed, the remaining tasks
are cancelled.
The `SplitFirst` class is useful in workflows where you want to initiate
multiple parallel tasks but only require the first task to complete,
cancelling the rest once the first task finishes.
Example:
.. code-block:: python
class MyFlow(flow.Flow):
split_first = SplitFirst().Next(this.task_a).Next(this.task_b)
task_a = flow.View(views.UserView).Next(this.join)
task_b = celery.Timer(delay=timedelata(minutes=10)).Next(this.join)
join = flow.Join()
"""
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
undo_view_class = views.UndoTaskView
revive_view_class = views.ReviveTaskView
try:
class StartSubprocess(
mixins.NodeDetailMixin,
mixins.NodeCancelMixin,
mixins.NodeUndoMixin,
nodes.StartSubprocess,
):
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
cancel_view_class = views.CancelTaskView
undo_view_class = views.UndoTaskView
class Subprocess(
mixins.NodeDetailMixin,
mixins.NodeCancelMixin,
mixins.NodeUndoMixin,
mixins.NodeReviveMixin,
nodes.Subprocess,
):
"""
The ``Subprocess`` node in a flow **(PRO-only)**
This node is used to start a subprocess flow within a parent flow. The
subprocess must be completed before the parent flow can proceed.
.. code-block:: python
class ExampleSubFlow(flow.Flow):
start = flow.StartHandle(this.start_func).Next(this.task)
task = flow.Handle(this.task_func).Next(this.end)
end = flow.End()
def start_func(self, activation):
# get access to parent process and data
activation.process.parent_task.process.data
def task_func(self, activation):
pass
class MainFlowWithSubprocess(flow.Flow):
start = flow.StartHandle().Next(this.subprocess)
subprocess = flow.Subprocess(ExampleSubFlow.start).Next(this.end)
end = flow.End()
"""
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
cancel_view_class = views.CancelTaskView
undo_view_class = views.UndoTaskView
revive_view_class = views.ReviveTaskView
class NSubprocess(
mixins.NodeDetailMixin,
mixins.NodeCancelMixin,
mixins.NodeUndoMixin,
mixins.NodeReviveMixin,
nodes.NSubprocess,
):
"""
The ``NSubprocess`` node in a flow **(PRO-only)**
This node is used to start multiple instances of a subprocess flow within a
parent flow. Each instance processes a different item, and all subprocesses
must be completed before the parent flow can proceed.
.. code-block:: python
class ExampleSubFlow(flow.Flow):
start = flow.StartHandle(this.start_func).Next(this.task) task =
flow.Handle(this.task_func).Next(this.end)
end = flow.End()
def start_func(self, activation, item=0):
# instantialed with one of 1, 2, 3, 4 as item
activation.process.data = item
def task_func(self, activation):
activation.process.data += 100
class MainFlowWithNSubprocess(flow.Flow):
start = flow.StartFunction().Next(this.nsubprocess) nsubprocess =
flow.NSubprocess(ExampleSubFlow.start, lambda p: [1, 2, 3, 4]).Next(this.end)
end = flow.End()
"""
index_view_class = views.IndexTaskView
detail_view_class = views.DetailTaskView
cancel_view_class = views.CancelTaskView
undo_view_class = views.UndoTaskView
revive_view_class = views.ReviveTaskView
except AttributeError:
"""Pro-only functionality"""
pass
| SplitFirst |
python | realpython__materials | python-guitar-synthesizer/source_code_final/src/digitar/temporal.py | {
"start": 224,
"end": 2095
} | class ____:
seconds: Decimal
@classmethod
def from_milliseconds(cls, milliseconds: Numeric) -> Self:
return cls(Decimal(str(float(milliseconds))) / 1000)
def __init__(self, seconds: Numeric) -> None:
match seconds:
case int() | float():
object.__setattr__(self, "seconds", Decimal(str(seconds)))
case Decimal():
object.__setattr__(self, "seconds", seconds)
case Fraction():
object.__setattr__(
self, "seconds", Decimal(str(float(seconds)))
)
case _:
raise TypeError(f"unsupported type '{type(seconds).__name__}'")
def __add__(self, seconds: Numeric | Self) -> Self:
match seconds:
case Time() as time:
return Time(self.seconds + time.seconds)
case int() | Decimal():
return Time(self.seconds + seconds)
case float():
return Time(self.seconds + Decimal(str(seconds)))
case Fraction():
return Time(Fraction.from_decimal(self.seconds) + seconds)
case _:
raise TypeError(f"can't add '{type(seconds).__name__}'")
def __mul__(self, seconds: Numeric) -> Self:
match seconds:
case int() | Decimal():
return Time(self.seconds * seconds)
case float():
return Time(self.seconds * Decimal(str(seconds)))
case Fraction():
return Time(Fraction.from_decimal(self.seconds) * seconds)
case _:
raise TypeError(
f"can't multiply by '{type(seconds).__name__}'"
)
def get_num_samples(self, sampling_rate: Hertz) -> int:
return round(self.seconds * round(sampling_rate))
@dataclass
| Time |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_linked_artifacts.py | {
"start": 552,
"end": 690
} | class ____(GQLResult):
edges: List[FetchLinkedArtifactsArtifactArtifactMembershipsEdges]
| FetchLinkedArtifactsArtifactArtifactMemberships |
python | doocs__leetcode | solution/0600-0699/0667.Beautiful Arrangement II/Solution.py | {
"start": 0,
"end": 490
} | class ____:
def constructArray(self, n: int, k: int) -> List[int]:
l, r = 1, n
ans = []
for i in range(k):
if i % 2 == 0:
ans.append(l)
l += 1
else:
ans.append(r)
r -= 1
for i in range(k, n):
if k % 2 == 0:
ans.append(r)
r -= 1
else:
ans.append(l)
l += 1
return ans
| Solution |
python | walkccc__LeetCode | solutions/1493. Longest Subarray of 1's After Deleting One Element/1493.py | {
"start": 0,
"end": 308
} | class ____:
def longestSubarray(self, nums: list[int]) -> int:
ans = 0
zeros = 0
l = 0
for r, num in enumerate(nums):
if num == 0:
zeros += 1
while zeros == 2:
if nums[l] == 0:
zeros -= 1
l += 1
ans = max(ans, r - l)
return ans
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 2026,
"end": 2153
} | class ____(int, builtins.object):
... # Y040 Do not inherit from "object" explicitly, as it is redundant in Python 3
| AlsoBad |
python | PyCQA__pylint | tests/functional/m/missing/missing_docstring.py | {
"start": 313,
"end": 517
} | class ____: # [missing-class-docstring]
pass
def public_undocumented(): # [missing-function-docstring]
pass
def __sizeof__():
# Special
pass
def __mangled():
pass
| ClassUndocumented |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataform.py | {
"start": 3155,
"end": 4153
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(COMPILATION_RESULT_STR)
def test_execute(self, compilation_result_mock, hook_mock):
compilation_result = {
"git_commitish": "main",
"workspace": WORKSPACE,
}
op = DataformCreateCompilationResultOperator(
task_id="create_compilation_result",
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
compilation_result=compilation_result,
)
compilation_result_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.return_value.create_compilation_result.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
compilation_result=compilation_result,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataformCreateCompilationResult |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.