language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/db/models/functions/datetime.py | {
"start": 5254,
"end": 6285
} | class ____(Extract):
lookup_name = "second"
DateField.register_lookup(ExtractYear)
DateField.register_lookup(ExtractMonth)
DateField.register_lookup(ExtractDay)
DateField.register_lookup(ExtractWeekDay)
DateField.register_lookup(ExtractIsoWeekDay)
DateField.register_lookup(ExtractWeek)
DateField.register_lookup(ExtractIsoYear)
DateField.register_lookup(ExtractQuarter)
TimeField.register_lookup(ExtractHour)
TimeField.register_lookup(ExtractMinute)
TimeField.register_lookup(ExtractSecond)
DateTimeField.register_lookup(ExtractHour)
DateTimeField.register_lookup(ExtractMinute)
DateTimeField.register_lookup(ExtractSecond)
ExtractYear.register_lookup(YearExact)
ExtractYear.register_lookup(YearGt)
ExtractYear.register_lookup(YearGte)
ExtractYear.register_lookup(YearLt)
ExtractYear.register_lookup(YearLte)
ExtractIsoYear.register_lookup(YearExact)
ExtractIsoYear.register_lookup(YearGt)
ExtractIsoYear.register_lookup(YearGte)
ExtractIsoYear.register_lookup(YearLt)
ExtractIsoYear.register_lookup(YearLte)
| ExtractSecond |
python | Pylons__pyramid | tests/test_config/pkgs/scannable/__init__.py | {
"start": 1442,
"end": 1662
} | class ____:
def __call__(self, context, request):
return 'grokked_instance'
grokked_instance = Foo()
grokked_instance = view_config(
name='grokked_instance', renderer=null_renderer
)(grokked_instance)
| Foo |
python | PyCQA__pylint | doc/data/messages/i/invalid-getnewargs-returned/good.py | {
"start": 0,
"end": 125
} | class ____:
"""__getnewargs__ returns <type 'tuple'>"""
def __getnewargs__(self):
return (1, 2)
| CustomGetNewArgs |
python | bokeh__bokeh | tests/unit/bokeh/util/test_token.py | {
"start": 1936,
"end": 8295
} | class ____:
def test_base64_roundtrip(self) -> None:
for s in [ "", "a", "ab", "abc", "abcd", "abcde", "abcdef", "abcdefg",
"abcdefgh", "abcdefghi",
"abcdefghijklmnopqrstuvwxyz" ]:
assert s == _b64_to_utf8(_base64_encode(s))
def test_reseed_if_needed(self) -> None:
# we have to set a seed in order to be able to get state
random.seed(codecs.encode("abcdefg", "utf-8"))
state = random.getstate()
_reseed_if_needed(using_sysrandom=True, secret_key=None)
# did NOT reseed
assert state == random.getstate()
# monkeypatch
saved = bokeh.util.token.random
try:
bokeh.util.token.random = random
_reseed_if_needed(using_sysrandom=False, secret_key="abc")
# DID reseed
assert state != random.getstate()
finally:
bokeh.util.token.random = saved
def test_signature(self) -> None:
sig = _signature("xyz", secret_key="abc")
with_same_key = _signature("xyz", secret_key="abc")
assert sig == with_same_key
with_different_key = _signature("xyz", secret_key="qrs")
assert sig != with_different_key
def test_generate_unsigned(self) -> None:
token = generate_jwt_token(generate_session_id(), signed=False)
assert '.' not in token
assert 123 == len(token)
assert "session_id" in json.loads(_b64_to_utf8(token))
another_token = generate_jwt_token(generate_session_id(), signed=False)
assert '.' not in another_token
assert 123 == len(another_token)
assert "session_id" in json.loads(_b64_to_utf8(another_token))
assert token != another_token
def test_payload_unsigned(self):
token = generate_jwt_token(generate_session_id(), signed=False, extra_payload=dict(foo=10))
assert '.' not in token
assert _TOKEN_ZLIB_KEY in json.loads(_b64_to_utf8(token))
payload = get_token_payload(token)
assert _TOKEN_ZLIB_KEY not in payload
assert payload['foo'] == 10
def test_payload_with_zlib_key(self):
token = generate_jwt_token(generate_session_id(), signed=False, extra_payload=dict([(_TOKEN_ZLIB_KEY, 10)]))
assert '.' not in token
assert _TOKEN_ZLIB_KEY in json.loads(_b64_to_utf8(token))
payload = get_token_payload(token)
assert _TOKEN_ZLIB_KEY in payload
assert payload[_TOKEN_ZLIB_KEY] == 10
def test_payload_error_unsigned(self):
session_id = generate_session_id()
with pytest.raises(RuntimeError):
generate_jwt_token(session_id, extra_payload=dict(session_id=10))
def test_generate_signed(self) -> None:
session_id = generate_session_id(signed=True, secret_key="abc")
token = generate_jwt_token(session_id, signed=True, secret_key="abc")
assert '.' in token
decoded = json.loads(_b64_to_utf8(token.split('.')[0]))
assert "session_id" in decoded
assert decoded['session_id'] == session_id
assert check_token_signature(token, secret_key="abc", signed=True)
assert not check_token_signature(token, secret_key="qrs", signed=True)
def test_payload_signed(self):
session_id = generate_session_id(signed=True, secret_key="abc")
token = generate_jwt_token(session_id, signed=True, secret_key="abc", extra_payload=dict(foo=10))
assert '.' in token
decoded = json.loads(_b64_to_utf8(token.split('.')[0]))
assert _TOKEN_ZLIB_KEY in decoded
assert 'session_id' in decoded
session_id = get_session_id(token)
assert check_token_signature(token, secret_key="abc", signed=True)
assert not check_token_signature(token, secret_key="qrs", signed=True)
payload = get_token_payload(token)
assert _TOKEN_ZLIB_KEY not in payload
assert payload['foo'] == 10
def test_payload_error(self):
session_id = generate_session_id()
with pytest.raises(RuntimeError):
generate_jwt_token(session_id, extra_payload=dict(session_id=10))
def test_check_signature_of_unsigned(self) -> None:
# secret shouldn't be used
token = generate_jwt_token(generate_session_id(), signed=False, secret_key="abc")
assert not check_token_signature(token, secret_key="abc", signed=True)
def test_check_signature_of_empty_string(self) -> None:
assert not check_token_signature("", secret_key="abc", signed=True)
def test_check_signature_of_junk_with_hyphen_in_it(self) -> None:
assert not check_token_signature("foo-bar-baz", secret_key="abc", signed=True)
def test_check_signature_with_signing_disabled(self) -> None:
assert check_token_signature("gobbledygook", secret_key="abc", signed=False)
def test_generate_secret_key(self) -> None:
key = generate_secret_key()
assert 44 == len(key)
key2 = generate_secret_key()
assert 44 == len(key2)
assert key != key2
def test_string_encoding_does_not_affect_session_id_check(self) -> None:
# originates from #6653
session_id = generate_session_id(signed=True, secret_key="abc")
token = generate_jwt_token(session_id, signed=True, secret_key="abc")
assert check_token_signature(token, secret_key="abc", signed=True)
def test_jwt_token_uses_utc_time(self) -> None:
# django server generates token using UTC timezone
token = generate_jwt_token("foo", expiration=0)
with patch.object(dt, "datetime", Mock(wraps=dt.datetime)) as patched_dt:
# mock bokeh server localtime to be UTC + 10
patched_dt.now.return_value = dt.datetime.now(tz=dt.timezone.utc) + dt.timedelta(hours=10)
payload = get_token_payload(token)
utcnow = calendar.timegm(dt.datetime.now(tz=dt.timezone.utc).timetuple())
assert utcnow -1 <= payload['session_expiry'] <= utcnow + 1
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| TestSessionId |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 69317,
"end": 75917
} | class ____(Response):
"""
Response of datasets.commit_version endpoint.
:param version: Committed version ID
:type version: str
:param parent: Committed version parent version ID
:type parent: str
:param dataset: Dataset ID
:type dataset: str
:param merged: Number of merged frames
:type merged: int
:param saved_and_updated: Number of saved and updated frames
:type saved_and_updated: int
:param deleted: Number of deleted frames
:type deleted: int
:param total: Total number of processed frames
:type total: int
:param failed: Number of failures
:type failed: int
:param errors: Failure details
:type errors: Sequence[dict]
"""
_service = "datasets"
_action = "commit_version"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataset": {"description": "Dataset ID", "type": ["string", "null"]},
"deleted": {
"description": "Number of deleted frames",
"type": ["integer", "null"],
},
"errors": {
"description": "Failure details",
"items": {
"additionalProperties": True,
"description": "Json object describing an update error",
"type": "object",
},
"type": ["array", "null"],
},
"failed": {
"description": "Number of failures",
"type": ["integer", "null"],
},
"merged": {
"description": "Number of merged frames",
"type": ["integer", "null"],
},
"parent": {
"description": "Committed version parent version ID",
"type": ["string", "null"],
},
"saved_and_updated": {
"description": "Number of saved and updated frames",
"type": ["integer", "null"],
},
"total": {
"description": "Total number of processed frames",
"type": ["integer", "null"],
},
"version": {
"description": "Committed version ID",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
version=None,
parent=None,
dataset=None,
merged=None,
saved_and_updated=None,
deleted=None,
total=None,
failed=None,
errors=None,
**kwargs
):
super(CommitVersionResponse, self).__init__(**kwargs)
self.version = version
self.parent = parent
self.dataset = dataset
self.merged = merged
self.saved_and_updated = saved_and_updated
self.deleted = deleted
self.total = total
self.failed = failed
self.errors = errors
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("merged")
def merged(self):
return self._property_merged
@merged.setter
def merged(self, value):
if value is None:
self._property_merged = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "merged", six.integer_types)
self._property_merged = value
@schema_property("saved_and_updated")
def saved_and_updated(self):
return self._property_saved_and_updated
@saved_and_updated.setter
def saved_and_updated(self, value):
if value is None:
self._property_saved_and_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "saved_and_updated", six.integer_types)
self._property_saved_and_updated = value
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("total")
def total(self):
return self._property_total
@total.setter
def total(self, value):
if value is None:
self._property_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total", six.integer_types)
self._property_total = value
@schema_property("failed")
def failed(self):
return self._property_failed
@failed.setter
def failed(self, value):
if value is None:
self._property_failed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "failed", six.integer_types)
self._property_failed = value
@schema_property("errors")
def errors(self):
return self._property_errors
@errors.setter
def errors(self, value):
if value is None:
self._property_errors = None
return
self.assert_isinstance(value, "errors", (list, tuple))
self.assert_isinstance(value, "errors", (dict,), is_array=True)
self._property_errors = value
| CommitVersionResponse |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/_internal/expandinput.py | {
"start": 2995,
"end": 3877
} | class ____(ResolveMixin):
"""
Stand-in stub for task-group-mapping arguments.
This is very similar to an XComArg, but resolved differently. Declared here
(instead of in the task group module) to avoid import cycles.
"""
_input: ExpandInput = attrs.field()
_key: str
@_input.validator
def _validate_input(self, _, input):
if isinstance(input, DictOfListsExpandInput):
for value in input.value.values():
if isinstance(value, MappedArgument):
raise ValueError("Nested Mapped TaskGroups are not yet supported")
def iter_references(self) -> Iterable[tuple[Operator, str]]:
yield from self._input.iter_references()
def resolve(self, context: Mapping[str, Any]) -> Any:
data, _ = self._input.resolve(context)
return data[self._key]
@attrs.define()
| MappedArgument |
python | spyder-ide__spyder | spyder/plugins/completion/tests/test_configdialog.py | {
"start": 634,
"end": 1370
} | class ____(QMainWindow):
sig_setup_finished = Signal()
def __init__(self, parent):
super().__init__(parent)
self.statusbar = Mock()
self.console = Mock()
@pytest.mark.parametrize(
'config_dialog',
[[MainWindowMock, [], [CompletionPlugin]]],
indirect=True)
def test_config_dialog(config_dialog):
expected_titles = {'General', 'Snippets', 'Linting', 'Introspection',
'Code formatting', 'Advanced', 'Other languages'}
configpage = config_dialog.get_page()
assert configpage
tabs = configpage.tabs
for i in range(0, tabs.count()):
tab_text = tabs.tabText(i)
assert tab_text in expected_titles
configpage.save_to_conf()
| MainWindowMock |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 21544,
"end": 23755
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server understood the request, but is
refusing to fulfill it.
code: 403, title: Forbidden
Raise this exception within :term:`view` code to immediately return the
:term:`forbidden view` to the invoking user. Usually this is a basic
``403`` page, but the forbidden view can be customized as necessary. See
:ref:`changing_the_forbidden_view`. A ``Forbidden`` exception will be
the ``context`` of a :term:`Forbidden View`.
This exception's constructor treats two arguments specially. The first
argument, ``detail``, should be a string. The value of this string will
be used as the ``message`` attribute of the exception object. The second
special keyword argument, ``result`` is usually an instance of
:class:`pyramid.security.Denied` or
:class:`pyramid.authorization.ACLDenied` each of which indicates a reason
for the forbidden error. However, ``result`` is also permitted to be just
a plain boolean ``False`` object or ``None``. The ``result`` value will
be used as the ``result`` attribute of the exception object.
It defaults to ``None``.
The :term:`Forbidden View` can use the attributes of a Forbidden
exception as necessary to provide extended information in an error
report shown to a user.
"""
# differences from webob.exc.HTTPForbidden:
#
# - accepts a ``result`` keyword argument
#
# - overrides constructor to set ``self.result``
#
# differences from older ``pyramid.exceptions.Forbidden``:
#
# - ``result`` must be passed as a keyword argument.
#
code = 403
title = 'Forbidden'
explanation = 'Access was denied to this resource.'
def __init__(
self,
detail=None,
headers=None,
comment=None,
body_template=None,
result=None,
**kw,
):
HTTPClientError.__init__(
self,
detail=detail,
headers=headers,
comment=comment,
body_template=body_template,
**kw,
)
self.result = result
| HTTPForbidden |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 55030,
"end": 55555
} | class ____(fixtures.TestBase):
__only_on__ = "postgresql"
__sparse_driver_backend__ = True
def test_reflection(self, connection, metadata):
Table(
"table",
metadata,
Column("x", Integer),
Column("y", postgresql.OID),
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table(
"table",
m2,
autoload_with=connection,
)
assert isinstance(t2.c.y.type, postgresql.OID)
| OIDTest |
python | django__django | tests/migrations/test_migrations_squashed/0001_squashed_0002.py | {
"start": 43,
"end": 903
} | class ____(migrations.Migration):
replaces = [
("migrations", "0001_initial"),
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("rating", models.IntegerField(default=0)),
],
),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
(
"author",
models.ForeignKey("migrations.Author", models.SET_NULL, null=True),
),
],
),
]
| Migration |
python | python-openxml__python-docx | src/docx/oxml/shape.py | {
"start": 6126,
"end": 6364
} | class ____(BaseOxmlElement):
"""Used for ``<a:off>`` element, and perhaps others.
Specifies an x, y coordinate (point).
"""
x = RequiredAttribute("x", ST_Coordinate)
y = RequiredAttribute("y", ST_Coordinate)
| CT_Point2D |
python | PrefectHQ__prefect | src/prefect/server/events/ordering/__init__.py | {
"start": 1452,
"end": 1599
} | class ____(Protocol):
async def __call__(
self, event: ReceivedEvent, depth: int = 0
) -> None: ... # pragma: no cover
| event_handler |
python | scrapy__scrapy | scrapy/downloadermiddlewares/stats.py | {
"start": 1095,
"end": 2876
} | class ____:
def __init__(self, stats: StatsCollector):
self.stats: StatsCollector = stats
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("DOWNLOADER_STATS"):
raise NotConfigured
assert crawler.stats
return cls(crawler.stats)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
self.stats.inc_value("downloader/request_count")
self.stats.inc_value(f"downloader/request_method_count/{request.method}")
reqlen = len(request_httprepr(request))
self.stats.inc_value("downloader/request_bytes", reqlen)
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
self.stats.inc_value("downloader/response_count")
self.stats.inc_value(f"downloader/response_status_count/{response.status}")
reslen = (
len(response.body)
+ get_header_size(response.headers)
+ get_status_size(response.status)
+ 4
)
# response.body + b"\r\n"+ response.header + b"\r\n" + response.status
self.stats.inc_value("downloader/response_bytes", reslen)
return response
@_warn_spider_arg
def process_exception(
self, request: Request, exception: Exception, spider: Spider | None = None
) -> Request | Response | None:
ex_class = global_object_name(exception.__class__)
self.stats.inc_value("downloader/exception_count")
self.stats.inc_value(f"downloader/exception_type_count/{ex_class}")
return None
| DownloaderStats |
python | facebook__pyre-check | pyre_extensions/tests/safe_json_test.py | {
"start": 457,
"end": 507
} | class ____(Movie):
rating: float
| MovieWithRating |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 20295,
"end": 21374
} | class ____(Operation):
def __init__(self, threshold=0.5, *, name=None):
super().__init__(name=name)
self.threshold = threshold
def call(self, x):
return backend.nn.hard_shrink(x, self.threshold)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.hard_shrink", "keras.ops.nn.hard_shrink"])
def hard_shrink(x, threshold=0.5):
"""Hard Shrink activation function.
The Hard Shrink function is a thresholding operation defined as:
`f(x) = x` if `|x| > threshold`,
`f(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-0.5, 0., 1.])
>>> x_hard_shrink = keras.ops.hard_shrink(x)
>>> print(x_hard_shrink)
array([0. 0. 1.], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardShrink(threshold).symbolic_call(x)
return backend.nn.hard_shrink(x, threshold)
| HardShrink |
python | numpy__numpy | numpy/_typing/_nbit_base.py | {
"start": 2694,
"end": 2828
} | class ____(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
pass
@final
@set_module("numpy._typing")
| _32Bit |
python | plotly__plotly.py | plotly/graph_objs/splom/_unselected.py | {
"start": 233,
"end": 2420
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom"
_path_str = "splom.unselected"
_valid_props = {"marker"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.splom.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.splom.unselected.Marker`
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.Unselected`
marker
:class:`plotly.graph_objects.splom.unselected.Marker`
instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | celery__celery | celery/platforms.py | {
"start": 17822,
"end": 25610
} | class ____:
"""Convenience interface to :mod:`signals`.
If the requested signal isn't supported on the current platform,
the operation will be ignored.
Example:
>>> from celery.platforms import signals
>>> from proj.handlers import my_handler
>>> signals['INT'] = my_handler
>>> signals['INT']
my_handler
>>> signals.supported('INT')
True
>>> signals.signum('INT')
2
>>> signals.ignore('USR1')
>>> signals['USR1'] == signals.ignored
True
>>> signals.reset('USR1')
>>> signals['USR1'] == signals.default
True
>>> from proj.handlers import exit_handler, hup_handler
>>> signals.update(INT=exit_handler,
... TERM=exit_handler,
... HUP=hup_handler)
"""
ignored = _signal.SIG_IGN
default = _signal.SIG_DFL
def arm_alarm(self, seconds):
return _arm_alarm(seconds)
def reset_alarm(self):
return _signal.alarm(0)
def supported(self, name):
"""Return true value if signal by ``name`` exists on this platform."""
try:
self.signum(name)
except AttributeError:
return False
else:
return True
def signum(self, name):
"""Get signal number by name."""
if isinstance(name, numbers.Integral):
return name
if not isinstance(name, str) \
or not name.isupper():
raise TypeError('signal name must be uppercase string.')
if not name.startswith('SIG'):
name = 'SIG' + name
return getattr(_signal, name)
def reset(self, *signal_names):
"""Reset signals to the default signal handler.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.default) for sig in signal_names)
def ignore(self, *names):
"""Ignore signal using :const:`SIG_IGN`.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.ignored) for sig in names)
def __getitem__(self, name):
return _signal.getsignal(self.signum(name))
def __setitem__(self, name, handler):
"""Install signal handler.
Does nothing if the current platform has no support for signals,
or the specified signal in particular.
"""
try:
_signal.signal(self.signum(name), handler)
except (AttributeError, ValueError):
pass
def update(self, _d_=None, **sigmap):
"""Set signal handlers from a mapping."""
for name, handler in dict(_d_ or {}, **sigmap).items():
self[name] = handler
signals = Signals()
get_signal = signals.signum # compat
install_signal_handler = signals.__setitem__ # compat
reset_signal = signals.reset # compat
ignore_signal = signals.ignore # compat
def signal_name(signum):
"""Return name of signal from signal number."""
return SIGMAP[signum][3:]
def strargv(argv):
arg_start = 2 if 'manage' in argv[0] else 1
if len(argv) > arg_start:
return ' '.join(argv[arg_start:])
return ''
def set_pdeathsig(name):
"""Sends signal ``name`` to process when parent process terminates."""
if signals.supported('SIGKILL'):
try:
_set_pdeathsig(signals.signum('SIGKILL'))
except OSError:
# We ignore when OS does not support set_pdeathsig
pass
def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
Only works if :pypi:`setproctitle` is installed.
"""
proctitle = f'[{progname}]'
proctitle = f'{proctitle} {info}' if info else proctitle
if _setproctitle:
_setproctitle.setproctitle(safe_str(proctitle))
return proctitle
if os.environ.get('NOSETPS'): # pragma: no cover
def set_mp_process_title(*a, **k):
"""Disabled feature."""
else:
def set_mp_process_title(progname, info=None, hostname=None):
"""Set the :command:`ps` name from the current process name.
Only works if :pypi:`setproctitle` is installed.
"""
if hostname:
progname = f'{progname}: {hostname}'
name = current_process().name if current_process else 'MainProcess'
return set_process_title(f'{progname}:{name}', info=info)
def get_errno_name(n):
"""Get errno for string (e.g., ``ENOENT``)."""
if isinstance(n, str):
return getattr(errno, n)
return n
@contextmanager
def ignore_errno(*errnos, **kwargs):
"""Context manager to ignore specific POSIX error codes.
Takes a list of error codes to ignore: this can be either
the name of the code, or the code integer itself::
>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()
>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass
Arguments:
types (Tuple[Exception]): A tuple of exceptions to ignore
(when the errno matches). Defaults to :exc:`Exception`.
"""
types = kwargs.get('types') or (Exception,)
errnos = [get_errno_name(errno) for errno in errnos]
try:
yield
except types as exc:
if not hasattr(exc, 'errno'):
raise
if exc.errno not in errnos:
raise
def check_privileges(accept_content):
if grp is None or pwd is None:
return
pickle_or_serialize = ('pickle' in accept_content
or 'application/group-python-serialize' in accept_content)
uid = os.getuid() if hasattr(os, 'getuid') else 65535
gid = os.getgid() if hasattr(os, 'getgid') else 65535
euid = os.geteuid() if hasattr(os, 'geteuid') else 65535
egid = os.getegid() if hasattr(os, 'getegid') else 65535
if hasattr(os, 'fchown'):
if not all(hasattr(os, attr)
for attr in ('getuid', 'getgid', 'geteuid', 'getegid')):
raise SecurityError('suspicious platform, contact support')
# Get the group database entry for the current user's group and effective
# group id using grp.getgrgid() method
# We must handle the case where either the gid or the egid are not found.
try:
gid_entry = grp.getgrgid(gid)
egid_entry = grp.getgrgid(egid)
except KeyError:
warnings.warn(SecurityWarning(ASSUMING_ROOT))
_warn_or_raise_security_error(egid, euid, gid, uid,
pickle_or_serialize)
return
# Get the group and effective group name based on gid
gid_grp_name = gid_entry[0]
egid_grp_name = egid_entry[0]
# Create lists to use in validation step later.
gids_in_use = (gid_grp_name, egid_grp_name)
groups_with_security_risk = ('sudo', 'wheel')
is_root = uid == 0 or euid == 0
# Confirm that the gid and egid are not one that
# can be used to escalate privileges.
if is_root or any(group in gids_in_use
for group in groups_with_security_risk):
_warn_or_raise_security_error(egid, euid, gid, uid,
pickle_or_serialize)
def _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize):
c_force_root = os.environ.get('C_FORCE_ROOT', False)
if pickle_or_serialize and not c_force_root:
raise SecurityError(ROOT_DISALLOWED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
))
warnings.warn(SecurityWarning(ROOT_DISCOURAGED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
)))
| Signals |
python | Pylons__pyramid | tests/test_scripts/test_proutes.py | {
"start": 25691,
"end": 25949
} | class ____(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.proutes import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['proutes'])
self.assertEqual(result, 2)
| Test_main |
python | agronholm__apscheduler | src/apscheduler/triggers/cron/expressions.py | {
"start": 4626,
"end": 5563
} | class ____(RangeExpression):
value_re: ClassVar[Pattern] = re.compile(
r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE
)
def __init__(self, first: str, last: str | None = None):
try:
first_num = MONTHS.index(first.lower()) + 1
except ValueError:
raise ValueError(f"Invalid month name {first!r}") from None
if last:
try:
last_num = MONTHS.index(last.lower()) + 1
except ValueError:
raise ValueError(f"Invalid month name {last!r}") from None
else:
last_num = None
super().__init__(first=first_num, last=last_num)
def __str__(self) -> str:
if self.last != self.first and self.last is not None:
return f"{MONTHS[self.first - 1]}-{MONTHS[self.last - 1]}"
return MONTHS[self.first - 1]
@attrs.define(kw_only=True, init=False)
| MonthRangeExpression |
python | sqlalchemy__sqlalchemy | test/dialect/test_all.py | {
"start": 112,
"end": 555
} | class ____(fixtures.TestBase):
def _all_dialect_packages(self):
return [
getattr(__import__("sqlalchemy.dialects.%s" % d).dialects, d)
for d in dialects.__all__
if not d.startswith("_")
]
def test_all_import(self):
for package in self._all_dialect_packages():
for item_name in package.__all__:
is_not(None, getattr(package, item_name))
| ImportStarTest |
python | django__django | tests/shortcuts/tests.py | {
"start": 1850,
"end": 2598
} | class ____(SimpleTestCase):
def test_redirect_response_status_code(self):
tests = [
(True, False, 301),
(False, False, 302),
(False, True, 307),
(True, True, 308),
]
for permanent, preserve_request, expected_status_code in tests:
with self.subTest(permanent=permanent, preserve_request=preserve_request):
response = redirect(
"/path/is/irrelevant/",
permanent=permanent,
preserve_request=preserve_request,
)
self.assertIsInstance(response, HttpResponseRedirectBase)
self.assertEqual(response.status_code, expected_status_code)
| RedirectTests |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 36211,
"end": 39231
} | class ____:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
rshft_2 = rshft_0[[2, 3, 0, 1]]
rshft_3 = rshft_0[[1, 2, 3, 0]]
rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
stacked = np.block([[[rshft_0]]] * 2)
# FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_one(self, dt):
def tz(mat):
mz = matrix_power(mat, 1)
assert_equal(mz, mat)
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_two(self, dt):
def tz(mat):
mz = matrix_power(mat, 2)
mmul = matmul if mat.dtype != object else dot
assert_equal(mz, mmul(mat, mat))
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_minus_one(self, dt):
def tz(mat):
invmat = matrix_power(mat, -1)
mmul = matmul if mat.dtype != object else dot
assert_almost_equal(
mmul(invmat, mat), identity_like_generalized(mat))
for mat in self.rshft_all:
if dt not in self.dtnoinv:
tz(mat.astype(dt))
def test_exceptions_bad_power(self, dt):
mat = self.rshft_0.astype(dt)
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_exceptions_not_invertible(self, dt):
if dt in self.dtnoinv:
return
mat = self.noninv.astype(dt)
assert_raises(LinAlgError, matrix_power, mat, -1)
| TestMatrixPower |
python | redis__redis-py | redis/commands/search/__init__.py | {
"start": 318,
"end": 3872
} | class ____(SearchCommands):
"""
Create a client for talking to search.
It abstracts the API of the module and lets you just use the engine.
"""
class BatchIndexer:
"""
A batch indexer allows you to automatically batch
document indexing in pipelines, flushing it every N documents.
"""
def __init__(self, client, chunk_size=1000):
self.client = client
self.execute_command = client.execute_command
self._pipeline = client.pipeline(transaction=False, shard_hint=None)
self.total = 0
self.chunk_size = chunk_size
self.current_chunk = 0
def __del__(self):
if self.current_chunk:
self.commit()
def add_document(
self,
doc_id,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
no_create=False,
**fields,
):
"""
Add a document to the batch query
"""
self.client._add_document(
doc_id,
conn=self._pipeline,
nosave=nosave,
score=score,
payload=payload,
replace=replace,
partial=partial,
no_create=no_create,
**fields,
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
self.commit()
def add_document_hash(self, doc_id, score=1.0, replace=False):
"""
Add a hash to the batch query
"""
self.client._add_document_hash(
doc_id, conn=self._pipeline, score=score, replace=replace
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
self.commit()
def commit(self):
"""
Manually commit and flush the batch indexing query
"""
self._pipeline.execute()
self.current_chunk = 0
def __init__(self, client, index_name="idx"):
"""
Create a new Client for the given index_name.
The default name is `idx`
If conn is not None, we employ an already existing redis connection
"""
self._MODULE_CALLBACKS = {}
self.client = client
self.index_name = index_name
self.execute_command = client.execute_command
self._pipeline = client.pipeline
self._RESP2_MODULE_CALLBACKS = {
INFO_CMD: self._parse_info,
SEARCH_CMD: self._parse_search,
HYBRID_CMD: self._parse_hybrid_search,
AGGREGATE_CMD: self._parse_aggregate,
PROFILE_CMD: self._parse_profile,
SPELLCHECK_CMD: self._parse_spellcheck,
CONFIG_CMD: self._parse_config_get,
SYNDUMP_CMD: self._parse_syndump,
}
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the SEARCH module, that can be used for executing
SEARCH commands, as well as classic core commands.
"""
p = Pipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self._MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p.index_name = self.index_name
return p
| Search |
python | optuna__optuna | optuna/visualization/_edf.py | {
"start": 727,
"end": 4482
} | class ____(NamedTuple):
lines: list[_EDFLineInfo]
x_values: np.ndarray
def plot_edf(
study: Study | Sequence[Study],
*,
target: Callable[[FrozenTrial], float] | None = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot the objective value EDF (empirical distribution function) of a study.
Note that only the complete trials are considered when plotting the EDF.
.. note::
EDF is useful to analyze and improve search spaces.
For instance, you can see a practical use case of EDF in the paper
`Designing Network Design Spaces
<https://doi.ieeecomputersociety.org/10.1109/CVPR42600.2020.01044>`__.
.. note::
The plotted EDF assumes that the value of the objective function is in
accordance with the uniform distribution over the objective space.
Args:
study:
A target :class:`~optuna.study.Study` object.
You can pass multiple studies if you want to compare those EDFs.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective optimization.
target_name:
Target's name to display on the axis label.
Returns:
A :class:`plotly.graph_objects.Figure` object.
"""
_imports.check()
layout = go.Layout(
title="Empirical Distribution Function Plot",
xaxis={"title": target_name},
yaxis={"title": "Cumulative Probability"},
)
info = _get_edf_info(study, target, target_name)
edf_lines = info.lines
if len(edf_lines) == 0:
return go.Figure(data=[], layout=layout)
traces = []
for study_name, y_values in edf_lines:
traces.append(go.Scatter(x=info.x_values, y=y_values, name=study_name, mode="lines"))
figure = go.Figure(data=traces, layout=layout)
figure.update_yaxes(range=[0, 1])
return figure
def _get_edf_info(
study: Study | Sequence[Study],
target: Callable[[FrozenTrial], float] | None = None,
target_name: str = "Objective Value",
) -> _EDFInfo:
if isinstance(study, Study):
studies = [study]
else:
studies = list(study)
_check_plot_args(studies, target, target_name)
if len(studies) == 0:
_logger.warning("There are no studies.")
return _EDFInfo(lines=[], x_values=np.array([]))
if target is None:
def _target(t: FrozenTrial) -> float:
return cast(float, t.value)
target = _target
study_names = []
all_values: list[np.ndarray] = []
for study in studies:
trials = _filter_nonfinite(
study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target
)
values = np.array([target(trial) for trial in trials])
all_values.append(values)
study_names.append(study.study_name)
if all(len(values) == 0 for values in all_values):
_logger.warning("There are no complete trials.")
return _EDFInfo(lines=[], x_values=np.array([]))
min_x_value = np.min(np.concatenate(all_values))
max_x_value = np.max(np.concatenate(all_values))
x_values = np.linspace(min_x_value, max_x_value, NUM_SAMPLES_X_AXIS)
edf_line_info_list = []
for study_name, values in zip(study_names, all_values):
y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size
edf_line_info_list.append(_EDFLineInfo(study_name=study_name, y_values=y_values))
return _EDFInfo(lines=edf_line_info_list, x_values=x_values)
| _EDFInfo |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/utils.py | {
"start": 9995,
"end": 14268
} | class ____:
pretty_printer = PrettyPrinter()
def __init__(self):
self.filename_cache = {}
def get_source_file(self, frame):
frame_filename = frame.f_code.co_filename
value = self.filename_cache.get(frame_filename)
if value is None:
filename = inspect.getsourcefile(frame)
if filename is None:
is_source = False
filename = frame_filename
else:
is_source = True
# Ensure linecache validity the first time this recorder
# encounters the filename in this frame.
linecache.checkcache(filename)
value = (filename, is_source)
self.filename_cache[frame_filename] = value
return value
def get_stack_trace(
self,
*,
excluded_modules: Sequence[str] | None = None,
include_locals: bool = False,
skip: int = 0,
):
trace = []
skip += 1 # Skip the frame for this method.
for frame in _stack_frames(skip=skip):
if _is_excluded_frame(frame, excluded_modules):
continue
filename, is_source = self.get_source_file(frame)
line_no = frame.f_lineno
func_name = frame.f_code.co_name
if is_source:
module = inspect.getmodule(frame, filename)
module_globals = module.__dict__ if module is not None else None
source_line = linecache.getline(
filename, line_no, module_globals
).strip()
else:
source_line = ""
if include_locals:
frame_locals = self.pretty_printer.pformat(frame.f_locals)
else:
frame_locals = None
trace.append((filename, line_no, func_name, source_line, frame_locals))
trace.reverse()
return trace
def get_stack_trace(*, skip=0):
"""
Return a processed stack trace for the current call stack.
If the ``ENABLE_STACKTRACES`` setting is False, return an empty :class:`list`.
Otherwise return a :class:`list` of processed stack frame tuples (file name, line
number, function name, source line, frame locals) for the current call stack. The
first entry in the list will be for the bottom of the stack and the last entry will
be for the top of the stack.
``skip`` is an :class:`int` indicating the number of stack frames above the frame
for this function to omit from the stack trace. The default value of ``0`` means
that the entry for the caller of this function will be the last entry in the
returned stack trace.
"""
config = dt_settings.get_config()
if not config["ENABLE_STACKTRACES"]:
return []
skip += 1 # Skip the frame for this function.
stack_trace_recorder = getattr(_local_data, "stack_trace_recorder", None)
if stack_trace_recorder is None:
stack_trace_recorder = _StackTraceRecorder()
_local_data.stack_trace_recorder = stack_trace_recorder
return stack_trace_recorder.get_stack_trace(
excluded_modules=config["HIDE_IN_STACKTRACES"],
include_locals=config["ENABLE_STACKTRACES_LOCALS"],
skip=skip,
)
def clear_stack_trace_caches():
if hasattr(_local_data, "stack_trace_recorder"):
del _local_data.stack_trace_recorder
_HTML_TYPES = ("text/html", "application/xhtml+xml")
def is_processable_html_response(response):
content_encoding = response.get("Content-Encoding", "")
content_type = response.get("Content-Type", "").split(";")[0]
return (
not getattr(response, "streaming", False)
and content_encoding == ""
and content_type in _HTML_TYPES
)
def get_csp_nonce(request) -> str | None:
"""
Retrieve the Content Security Policy nonce from a request if there is one.
This supports both the django-csp and the built-in Django implementations.
"""
# django-csp uses request.csp_nonce
csp_nonce = getattr(request, "csp_nonce", None)
if csp_nonce is not None:
return csp_nonce
# Django's built-in CSP support uses get_nonce(request)
return compat.get_nonce(request)
| _StackTraceRecorder |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py | {
"start": 97,
"end": 288
} | class ____(dg.ConfigurableIOManager):
def handle_output(self, context: dg.OutputContext, obj):
pass
def load_input(self, context: dg.InputContext):
pass
| PandasIOManager |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 135693,
"end": 140895
} | class ____(TorchHigherOrderOperatorVariable):
@staticmethod
def normalize_to_args(args, kwargs):
# input signature is (query, key, value, score_mod, block_mask, *other_buffers),
# block_mask is a tuple, and we don't want to flatten it.
# only flatten kwargs into lists
flat_kwargs = pytree.tree_flatten(kwargs)[0]
# Combine the flattened lists
all_args = args + flat_kwargs
return all_args
def create_wrapped_node(
self,
tx: "InstructionTranslator",
query: "VariableTracker",
fn: "VariableTracker",
fn_name: str,
):
from .._trace_wrapped_higher_order_op import TransformGetItemToIndex
def create_scalar():
return query.call_method(
tx,
"new_empty",
(VariableTracker.build(tx, []),),
{
"dtype": VariableTracker.build(tx, torch.int32),
},
)
with discard_graph_changes(tx):
bhmn = [create_scalar() for _ in range(4)]
if fn_name == "score_mod":
scores_require_grad: bool = query.requires_grad
score = query.call_method(
tx,
"new_empty",
(VariableTracker.build(tx, []),),
{"requires_grad": VariableTracker.build(tx, scores_require_grad)},
)
new_args = [score, *bhmn]
else:
assert fn_name == "mask_fn", "Illegal function name: " + fn_name
new_args = [*bhmn]
with TransformGetItemToIndex():
(
(_body_output, _body_spec),
body_graph,
body_lifted_freevars,
) = speculate_subgraph(
tx,
fn,
new_args,
{}, # expect only args no kwargs for now
description=fn_name,
source_target=self.value,
set_subgraph_inputs="flatten_manual",
)
body_name = tx.output.install_subgraph(
fn_name,
torch.fx.GraphModule(tx.output.nn_modules, body_graph),
)
body_node = make_attr(tx, body_name)
# It is possible that the score-mod function captures some free variables that are not
# passed in as arguments. In this case, we need to lift them, which is handled by speculate_subgraph.
# We then need to create proxies for this + the inputs.
lifted_args = tuple(arg for arg in body_lifted_freevars)
proxy_args = (body_node, lifted_args)
return proxy_args
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
from .builder import wrap_fx_proxy
(
query,
key,
value,
score_mod,
block_mask,
scale,
kernel_options,
) = self.normalize_to_args(args, kwargs)
score_mod_node, score_mod_lifted_args = self.create_wrapped_node(
tx, query, score_mod, "score_mod"
)
mask_fn = block_mask.items[-1]
if isinstance(mask_fn, ConstantVariable):
mask_fn = UserFunctionVariable(torch.nn.attention._flex_attention._no_mask)
mask_fn_node, mask_fn_lifted_args = self.create_wrapped_node(
tx, query, mask_fn, "mask_fn"
)
proxied_args = [
query,
key,
value,
TupleVariable(block_mask.items[:-1], source=block_mask.source),
scale,
kernel_options,
]
# Store the invocation as a call
# Norm_kwargs contains the score_function and we dont want to proxy this because
# Proxying user defined functions is not supported.
inp_args, _ = proxy_args_kwargs(proxied_args, {})
# Compose the ordered HOO args:
# - inp_args: [query, key, value, block_mask, scale, kernel_options]
# - subgraph node: [score_mod, mask_fn_node]
# - lifted args from tracing subgraph: [score_mod_other_buffers, mask_fn_other_buffers]
_, _, _, inp_arg_block_mask, inp_arg_scale, inp_arg_kernel_options = inp_args
block_mask = tuple(inp_arg_block_mask + (mask_fn_node,))
with torch.fx.experimental.proxy_tensor.set_original_aten_op(self.value):
proxy = wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
self.value,
args=inp_args[:3]
+ (
score_mod_node,
block_mask,
inp_arg_scale,
inp_arg_kernel_options,
score_mod_lifted_args,
mask_fn_lifted_args,
),
kwargs={},
),
example_value=None,
)
return proxy
| FlexAttentionHigherOrderVariable |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/grid_finder.py | {
"start": 3748,
"end": 4644
} | class ____(Transform):
"""A transform defined by two user-set functions."""
input_dims = output_dims = 2
def __init__(self, forward, backward):
"""
Parameters
----------
forward, backward : callable
The forward and backward transforms, taking ``x`` and ``y`` as
separate arguments and returning ``(tr_x, tr_y)``.
"""
# The normal Matplotlib convention would be to take and return an
# (N, 2) array but axisartist uses the transposed version.
super().__init__()
self._forward = forward
self._backward = backward
def transform_non_affine(self, values):
# docstring inherited
return np.transpose(self._forward(*np.transpose(values)))
def inverted(self):
# docstring inherited
return type(self)(self._backward, self._forward)
| _User2DTransform |
python | huggingface__transformers | src/transformers/models/mobilebert/modeling_mobilebert.py | {
"start": 34141,
"end": 34617
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@auto_docstring(
custom_intro="""
MobileBert Model with a `next sentence prediction (classification)` head on top.
"""
)
| MobileBertOnlyNSPHead |
python | django__django | tests/admin_views/models.py | {
"start": 7374,
"end": 7781
} | class ____(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, models.CASCADE, related_name="accounts")
servicename = "generic service"
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
| Account |
python | dask__distributed | distributed/diagnostics/progress.py | {
"start": 10707,
"end": 13908
} | class ____(SchedulerPlugin):
"""Keep track of high-level timing information for task group progress"""
name: ClassVar[str] = "group-timing"
time: list[float]
compute: dict[str, list[float]]
nthreads: list[float]
def __init__(self, scheduler):
self.scheduler = scheduler
# Time bin size (in seconds). TODO: make this configurable?
self.dt = 1.0
# Initialize our data structures.
self._init()
def _init(self) -> None:
"""Shared initializatoin code between __init__ and restart"""
now = time()
# Timestamps for tracking compute durations by task group.
# Start with length 2 so that we always can compute a valid dt later.
self.time = [now] * 2
# The amount of compute since the last timestamp
self.compute = {}
# The number of threads at the time
self.nthreads = [self.scheduler.total_nthreads] * 2
def transition(self, key, start, finish, *args, **kwargs):
# We are mostly interested in when tasks complete for now, so just look
# for when processing transitions to memory. Later we could also extend
# this if we can come up with useful visual channels to show it in.
if start == "processing" and finish == "memory":
startstops = kwargs.get("startstops")
if not startstops:
logger.warning(
f"Task {key} finished processing, but timing information seems to "
"be missing"
)
return
# Possibly extend the timeseries if another dt has passed
now = time()
self.time[-1] = now
while self.time[-1] - self.time[-2] > self.dt:
self.time[-1] = self.time[-2] + self.dt
self.time.append(now)
self.nthreads.append(self.scheduler.total_nthreads)
for g in self.compute.values():
g.append(0.0)
# Get the task
task = self.scheduler.tasks[key]
group = task.group
# If the group is new, add it to the timeseries as if it has been
# here the whole time
if group.name not in self.compute:
self.compute[group.name] = [0.0] * len(self.time)
for startstop in startstops:
if startstop["action"] != "compute":
continue
stop = startstop["stop"]
start = startstop["start"]
idx = len(self.time) - 1
# If the stop time is after the most recent bin,
# roll back the current index. Not clear how often this happens.
while idx > 0 and self.time[idx - 1] > stop:
idx -= 1
# Allocate the timing information of the task to the time bins.
while idx > 0 and stop > start:
delta = stop - max(self.time[idx - 1], start)
self.compute[group.name][idx] += delta
stop -= delta
idx -= 1
def restart(self, scheduler):
self._init()
| GroupTiming |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image04.py | {
"start": 339,
"end": 2851
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image04.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_footer(
"&L&G&C&G&R&G",
{
"image_left": self.image_dir + "red.jpg",
"image_center": self.image_dir + "blue.jpg",
"image_right": self.image_dir + "yellow.jpg",
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_picture(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_footer(
"&L&[Picture]&C&G&R&[Picture]",
{
"image_left": self.image_dir + "red.jpg",
"image_center": self.image_dir + "blue.jpg",
"image_right": self.image_dir + "yellow.jpg",
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.jpg", "rb") as image_file_left:
image_data_left = BytesIO(image_file_left.read())
with open(self.image_dir + "blue.jpg", "rb") as image_file_center:
image_data_center = BytesIO(image_file_center.read())
with open(self.image_dir + "yellow.jpg", "rb") as image_file_right:
image_data_right = BytesIO(image_file_right.read())
worksheet.set_footer(
"&L&G&C&G&R&G",
{
"image_left": "red.jpg",
"image_center": "blue.jpg",
"image_right": "yellow.jpg",
"image_data_left": image_data_left,
"image_data_center": image_data_center,
"image_data_right": image_data_right,
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/apache/hive/src/airflow/providers/apache/hive/hooks/hive.py | {
"start": 33561,
"end": 44786
} | class ____(DbApiHook):
"""
Wrapper around the pyhive library.
Notes:
* the default auth_mechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:param schema: Hive database name.
"""
conn_name_attr = "hiveserver2_conn_id"
default_conn_name = "hiveserver2_default"
conn_type = "hiveserver2"
hook_name = "Hive Server 2 Thrift"
supports_autocommit = False
def get_conn(self, schema: str | None = None) -> Any:
"""Return a Hive connection object."""
username: str | None = None
password: str | None = None
db = self.get_connection(self.get_conn_id())
auth_mechanism = db.extra_dejson.get("auth_mechanism", "NONE")
if auth_mechanism == "NONE" and db.login is None:
# we need to give a username
username = "airflow"
kerberos_service_name = None
if conf.get("core", "security") == "kerberos":
auth_mechanism = db.extra_dejson.get("auth_mechanism", "KERBEROS")
kerberos_service_name = db.extra_dejson.get("kerberos_service_name", "hive")
# Password should be set if and only if in LDAP or CUSTOM mode
if auth_mechanism in ("LDAP", "CUSTOM"):
password = db.password
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=password,
database=schema or db.schema or "default",
)
def _get_results(
self,
sql: str | list[str],
schema: str = "default",
fetch_size: int | None = None,
hive_conf: Iterable | Mapping | None = None,
) -> Any:
from pyhive.exc import ProgrammingError
if isinstance(sql, str):
sql = [sql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
db = self.get_connection(self.get_conn_id())
# Not all query services (e.g. impala) support the set command
if db.extra_dejson.get("run_set_variable_statements", True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute(f"set {k}={v}")
for statement in sql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if lowered_statement.startswith(("select", "with", "show")) or (
lowered_statement.startswith("set") and "=" not in lowered_statement
):
description = cur.description
if previous_description and previous_description != description:
message = f"""The statements are producing different descriptions:
Current: {description!r}
Previous: {previous_description!r}"""
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(
self,
sql: str | list[str],
schema: str = "default",
fetch_size: int | None = None,
hive_conf: Iterable | Mapping | None = None,
) -> dict[str, Any]:
"""
Get results of the provided hql in target schema.
:param sql: hql to be executed.
:param schema: target schema, default to 'default'.
:param fetch_size: max size of result to fetch.
:param hive_conf: hive_conf to execute alone with the hql.
:return: results of hql execution, dict with data (list of results) and header
"""
results_iter = self._get_results(sql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {"data": list(results_iter), "header": header}
return results
def to_csv(
self,
sql: str,
csv_filepath: str,
schema: str = "default",
delimiter: str = ",",
lineterminator: str = "\r\n",
output_header: bool = True,
fetch_size: int = 1000,
hive_conf: dict[Any, Any] | None = None,
) -> None:
"""
Execute hql in target schema and write results to a csv file.
:param sql: hql to be executed.
:param csv_filepath: filepath of csv to write results into.
:param schema: target schema, default to 'default'.
:param delimiter: delimiter of the csv file, default to ','.
:param lineterminator: lineterminator of the csv file.
:param output_header: header of the csv file, default to True.
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:param hive_conf: hive_conf to execute alone with the hql.
"""
results_iter = self._get_results(sql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=delimiter, lineterminator=lineterminator)
try:
if output_header:
self.log.debug("Cursor description is %s", header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(
self, sql: str | list[str], parameters: Iterable | Mapping[str, Any] | None = None, **kwargs
) -> Any:
"""
Get a set of records from a Hive query; optionally pass a 'schema' kwarg to specify target schema.
:param sql: hql to be executed.
:param parameters: optional configuration passed to get_results
:return: result of hive execution
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
schema = kwargs["schema"] if "schema" in kwargs else "default"
return self.get_results(sql, schema=schema, hive_conf=parameters)["data"]
def _get_pandas_df(
self,
sql,
parameters: list[Any] | tuple[Any, ...] | Mapping[str, Any] | None = None,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
**kwargs,
) -> pd.DataFrame:
try:
import pandas as pd
except ImportError as e:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(e)
res = self.get_results(sql, schema=schema, hive_conf=hive_conf)
df = pd.DataFrame(res["data"], columns=[c[0] for c in res["header"]], **kwargs)
return df
def _get_polars_df(
self,
sql,
parameters: list[Any] | tuple[Any, ...] | Mapping[str, Any] | None = None,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
**kwargs,
) -> pl.DataFrame:
try:
import polars as pl
except ImportError as e:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(e)
res = self.get_results(sql, schema=schema, hive_conf=hive_conf)
df = pl.DataFrame(res["data"], schema=[c[0] for c in res["header"]], orient="row", **kwargs)
return df
@overload # type: ignore[override]
def get_df(
self,
sql: str,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
*,
df_type: Literal["pandas"] = "pandas",
**kwargs: Any,
) -> pd.DataFrame: ...
@overload
def get_df(
self,
sql: str,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
*,
df_type: Literal["polars"],
**kwargs: Any,
) -> pl.DataFrame: ...
def get_df(
self,
sql: str,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
*,
df_type: Literal["pandas", "polars"] = "pandas",
**kwargs,
) -> pd.DataFrame | pl.DataFrame:
"""
Get a pandas / polars dataframe from a Hive query.
:param sql: hql to be executed.
:param schema: target schema, default to 'default'.
:param hive_conf: hive_conf to execute alone with the hql.
:param df_type: type of dataframe to return, either 'pandas' or 'polars'
:param kwargs: (optional) passed into pandas.DataFrame constructor
:return: result of hive execution
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_df(sql, df_type="pandas")
>>> len(df.index)
100
:return: pandas.DateFrame | polars.DataFrame
"""
if df_type == "pandas":
return self._get_pandas_df(sql, schema=schema, hive_conf=hive_conf, **kwargs)
if df_type == "polars":
return self._get_polars_df(sql, schema=schema, hive_conf=hive_conf, **kwargs)
@deprecated(
reason="Replaced by function `get_df`.",
category=AirflowProviderDeprecationWarning,
action="ignore",
)
def get_pandas_df( # type: ignore
self,
sql: str,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
**kwargs,
) -> pd.DataFrame:
return self._get_pandas_df(sql, schema=schema, hive_conf=hive_conf, **kwargs)
| HiveServer2Hook |
python | ray-project__ray | python/ray/llm/_internal/serve/observability/metrics/middleware.py | {
"start": 716,
"end": 5169
} | class ____:
"""Measures and stores HTTP request metrics."""
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] not in ("http", "websocket"):
await self.app(scope, receive, send)
# If the status_code isn't set by send_wrapper,
# we should consider that an error.
status_code = 500
send_wrapper_failed_exc_info = "Status code was never set by send_wrapper."
exception_info = send_wrapper_failed_exc_info
async def send_wrapper(message: Message) -> None:
"""Wraps the send message.
Enables this middleware to access the response headers.
"""
nonlocal status_code, exception_info
if message["type"] == "http.response.start":
status_code = message.get("status", 500)
# Clear the send_wrapper_failed_exc_info.
if exception_info == send_wrapper_failed_exc_info:
exception_info = None
await send(message)
request = Request(scope)
req_id = get_request_id(request)
now = time.monotonic()
try:
logger.info(f"Starting handling of the request {req_id}")
await self.app(scope, receive, send_wrapper)
except CancelledError as ce:
status_code = -1
exception_info = ce
raise
except BaseException as e:
status_code = 500
exception_info = e
raise
finally:
duration_s = time.monotonic() - now
tags = _get_tags(request, status_code, request.app)
# NOTE: Custom decorators are not applied to histogram-based metrics
# to make sure we can keep cardinality of those in check
truncated_tags = {
**tags,
FASTAPI_HTTP_USER_ID_TAG_KEY: "truncated",
}
request.app.state.http_requests_metrics.inc(1, tags)
request.app.state.http_requests_latency_metrics.observe(
duration_s, truncated_tags
)
extra_context = {
"status_code": status_code,
"duration_ms": duration_s * 1000,
}
if status_code >= 400:
log = logger.error if status_code >= 500 else logger.warning
log(
f"Handling of the request {req_id} failed",
exc_info=exception_info,
extra={"ray_serve_extra_fields": extra_context},
)
elif status_code == -1:
logger.info(
f"Handling of the request {req_id} have been cancelled",
extra={"ray_serve_extra_fields": extra_context},
)
else:
logger.info(
f"Handling of the request {req_id} successfully completed",
extra={"ray_serve_extra_fields": extra_context},
)
def _get_route_details(scope):
"""
Function to retrieve Starlette route from scope.
TODO: there is currently no way to retrieve http.route from
a starlette application from scope.
See: https://github.com/encode/starlette/pull/804
Args:
scope: A Starlette scope
Returns:
A string containing the route or None
"""
app = scope["app"]
route = None
for starlette_route in app.routes:
match, _ = starlette_route.matches(scope)
if match == Match.FULL:
route = starlette_route.path
break
if match == Match.PARTIAL:
route = starlette_route.path
return route
def _get_tags(request: Request, status_code: int, app: FastAPI) -> Dict[str, str]:
"""Generates tags for the request's metrics."""
route = str(_get_route_details(request.scope)) or "unknown"
path = str(request.url.path) or "unknown"
method = str(request.method) or "unknown"
user_id = str(get_user_id(request) or "unknown")
return {
FASTAPI_API_SERVER_TAG_KEY: get_app_name(app),
FASTAPI_HTTP_RESPONSE_CODE_TAG_KEY: str(status_code),
FASTAPI_HTTP_PATH_TAG_KEY: path,
FASTAPI_HTTP_HANDLER_TAG_KEY: route,
FASTAPI_HTTP_METHOD_TAG_KEY: method,
FASTAPI_HTTP_USER_ID_TAG_KEY: user_id,
}
| MeasureHTTPRequestMetricsMiddleware |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/inotify.py | {
"start": 7207,
"end": 8049
} | class ____(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
| InotifyFullEmitter |
python | boto__boto3 | boto3/dynamodb/types.py | {
"start": 2006,
"end": 7082
} | class ____:
"""This class serializes Python data types to DynamoDB types."""
def serialize(self, value):
"""The method to serialize the Python data types.
:param value: A python value to be serialized to DynamoDB. Here are
the various conversions:
Python DynamoDB
------ --------
None {'NULL': True}
True/False {'BOOL': True/False}
int/Decimal {'N': str(value)}
string {'S': string}
Binary/bytearray/bytes (py3 only) {'B': bytes}
set([int/Decimal]) {'NS': [str(value)]}
set([string]) {'SS': [string])
set([Binary/bytearray/bytes]) {'BS': [bytes]}
list {'L': list}
dict {'M': dict}
For types that involve numbers, it is recommended that ``Decimal``
objects are used to be able to round-trip the Python type.
For types that involve binary, it is recommended that ``Binary``
objects are used to be able to round-trip the Python type.
:rtype: dict
:returns: A dictionary that represents a dynamoDB data type. These
dictionaries can be directly passed to botocore methods.
"""
dynamodb_type = self._get_dynamodb_type(value)
serializer = getattr(self, f'_serialize_{dynamodb_type}'.lower())
return {dynamodb_type: serializer(value)}
def _get_dynamodb_type(self, value):
dynamodb_type = None
if self._is_null(value):
dynamodb_type = NULL
elif self._is_boolean(value):
dynamodb_type = BOOLEAN
elif self._is_number(value):
dynamodb_type = NUMBER
elif self._is_string(value):
dynamodb_type = STRING
elif self._is_binary(value):
dynamodb_type = BINARY
elif self._is_type_set(value, self._is_number):
dynamodb_type = NUMBER_SET
elif self._is_type_set(value, self._is_string):
dynamodb_type = STRING_SET
elif self._is_type_set(value, self._is_binary):
dynamodb_type = BINARY_SET
elif self._is_map(value):
dynamodb_type = MAP
elif self._is_listlike(value):
dynamodb_type = LIST
else:
msg = f'Unsupported type "{type(value)}" for value "{value}"'
raise TypeError(msg)
return dynamodb_type
def _is_null(self, value):
if value is None:
return True
return False
def _is_boolean(self, value):
if isinstance(value, bool):
return True
return False
def _is_number(self, value):
if isinstance(value, (int, Decimal)):
return True
elif isinstance(value, float):
raise TypeError(
'Float types are not supported. Use Decimal types instead.'
)
return False
def _is_string(self, value):
if isinstance(value, str):
return True
return False
def _is_binary(self, value):
if isinstance(value, (Binary, bytearray, bytes)):
return True
return False
def _is_set(self, value):
if isinstance(value, collections_abc.Set):
return True
return False
def _is_type_set(self, value, type_validator):
if self._is_set(value):
if False not in map(type_validator, value):
return True
return False
def _is_map(self, value):
if isinstance(value, collections_abc.Mapping):
return True
return False
def _is_listlike(self, value):
if isinstance(value, (list, tuple)):
return True
return False
def _serialize_null(self, value):
return True
def _serialize_bool(self, value):
return value
def _serialize_n(self, value):
number = str(DYNAMODB_CONTEXT.create_decimal(value))
if number in ['Infinity', 'NaN']:
raise TypeError('Infinity and NaN not supported')
return number
def _serialize_s(self, value):
return value
def _serialize_b(self, value):
if isinstance(value, Binary):
value = value.value
return value
def _serialize_ss(self, value):
return [self._serialize_s(s) for s in value]
def _serialize_ns(self, value):
return [self._serialize_n(n) for n in value]
def _serialize_bs(self, value):
return [self._serialize_b(b) for b in value]
def _serialize_l(self, value):
return [self.serialize(v) for v in value]
def _serialize_m(self, value):
return {k: self.serialize(v) for k, v in value.items()}
| TypeSerializer |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 162784,
"end": 166778
} | class ____(Request):
"""
Add or update task configuration
:param task: Task ID
:type task: str
:param configuration: Task configuration items. The new ones will be added and
the already existing ones will be updated
:type configuration: Sequence[ConfigurationItem]
:param replace_configuration: If set then the all the configuration items will
be replaced with the provided ones. Otherwise only the provided configuration
items will be updated or added
:type replace_configuration: bool
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.9"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configuration": {
"description": "Task configuration items. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"replace_configuration": {
"description": "If set then the all the configuration items will be replaced with the provided ones. Otherwise only the provided configuration items will be updated or added",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "configuration"],
"type": "object",
}
def __init__(
self, task: str, configuration: List[Any], replace_configuration: Optional[bool] = None, **kwargs: Any
) -> None:
super(EditConfigurationRequest, self).__init__(**kwargs)
self.task = task
self.configuration = configuration
self.replace_configuration = replace_configuration
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("configuration")
def configuration(self) -> List[Any]:
return self._property_configuration
@configuration.setter
def configuration(self, value: List[Any]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", (dict, ConfigurationItem), is_array=True)
value = [ConfigurationItem(**v) if isinstance(v, dict) else v for v in value]
self._property_configuration = value
@schema_property("replace_configuration")
def replace_configuration(self) -> Optional[bool]:
return self._property_replace_configuration
@replace_configuration.setter
def replace_configuration(self, value: Optional[bool]) -> None:
if value is None:
self._property_replace_configuration = None
return
self.assert_isinstance(value, "replace_configuration", (bool,))
self._property_replace_configuration = value
| EditConfigurationRequest |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 1531,
"end": 2031
} | class ____(resource.Resource):
"""
A testing resource which renders itself as the contents of the request body
as long as the request body is 100 bytes long, otherwise which renders
itself as C{"ERROR"}.
"""
def render(self, request):
data = request.content.read()
contentLength = request.requestHeaders.getRawHeaders(b"content-length")[0]
if len(data) != 100 or int(contentLength) != 100:
return b"ERROR"
return data
| PayloadResource |
python | getsentry__sentry | src/sentry/api/endpoints/organization_trace_logs.py | {
"start": 1034,
"end": 5893
} | class ____(OrganizationEventsV2EndpointBase):
"""Replaces a call to events that isn't possible for team plans because of projects restrictions"""
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get_projects(
self,
request: HttpRequest,
organization: Organization | RpcOrganization,
force_global_perms: bool = False,
include_all_accessible: bool = False,
project_ids: set[int] | None = None,
project_slugs: set[str] | None = None,
) -> list[Project]:
"""The trace endpoint always wants to get all projects regardless of what's passed into the API
This is because a trace can span any number of projects in an organization. But we still want to
use the get_projects function to check for any permissions. So we'll just pass project_ids=-1 everytime
which is what would be sent if we wanted all projects"""
return super().get_projects(
request,
organization,
project_ids={-1},
project_slugs=None,
include_all_accessible=True,
)
@sentry_sdk.tracing.trace
def query_logs_data(
self,
snuba_params: SnubaParams,
trace_ids: list[str],
replay_id: str | None,
orderby: list[str],
additional_query: str | None,
offset: int,
limit: int,
) -> EventsResponse:
"""Queries log data for a given trace"""
required_keys = [
"id",
"project.id",
constants.TRACE_ALIAS,
"severity_number",
"severity",
constants.TIMESTAMP_ALIAS,
constants.TIMESTAMP_PRECISE_ALIAS,
"message",
]
# Validate that orderby values are also in required_keys
for column in orderby:
stripped_orderby = column.lstrip("-")
if stripped_orderby not in required_keys:
raise ParseError(
f"{stripped_orderby} must be one of {','.join(sorted(required_keys))}"
)
base_query_parts = []
# Create the query based on trace id and/or replay id
if trace_ids:
trace_query = (
f"{constants.TRACE_ALIAS}:{trace_ids[0]}"
if len(trace_ids) == 1
else f"{constants.TRACE_ALIAS}:[{','.join(trace_ids)}]"
)
base_query_parts.append(trace_query)
if replay_id:
replay_query = f"replay_id:{replay_id}"
base_query_parts.append(replay_query)
if len(base_query_parts) > 1:
base_query = f"({' OR '.join(base_query_parts)})"
else:
base_query = base_query_parts[0]
if additional_query is not None:
query = f"{base_query} and {additional_query}"
else:
query = base_query
results = OurLogs.run_table_query(
params=snuba_params,
query_string=query,
selected_columns=required_keys,
orderby=orderby,
offset=offset,
limit=limit,
referrer=Referrer.API_TRACE_VIEW_LOGS.value,
config=SearchResolverConfig(use_aggregate_conditions=False),
# Since we're getting all logs for a given trace we always want highest accuracy
sampling_mode=constants.SAMPLING_MODE_HIGHEST_ACCURACY,
)
return results
def get(self, request: Request, organization: Organization) -> HttpResponse:
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response(status=404)
trace_ids = request.GET.getlist("traceId", [])
replay_id = request.GET.get("replayId")
for trace_id in trace_ids:
if not is_event_id(trace_id):
raise ParseError(INVALID_ID_DETAILS.format(trace_id))
if replay_id and not is_event_id(replay_id):
raise ParseError(INVALID_ID_DETAILS.format(replay_id))
if len(trace_ids) == 0 and not replay_id:
raise ParseError("Need to pass at least one traceId or replayId")
orderby = request.GET.getlist("sort", ["-timestamp", "-timestamp_precise"])
additional_query = request.GET.get("query")
update_snuba_params_with_timestamp(request, snuba_params)
def data_fn(offset: int, limit: int) -> EventsResponse:
with handle_query_errors():
return self.query_logs_data(
snuba_params, trace_ids, replay_id, orderby, additional_query, offset, limit
)
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
max_per_page=9999,
)
| OrganizationTraceLogsEndpoint |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 3379,
"end": 4351
} | class ____(unittest.TestCase):
def test_indexflat_l2_sync_norms_1(self):
d = 32
nb = 10000
nt = 0
nq = 16
k = 10
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
# instantiate IndexHNSWFlat
index = faiss.IndexHNSWFlat(d, 32)
index.hnsw.efConstruction = 40
index.add(xb)
D1, I1 = index.search(xq, k)
index_l2 = faiss.downcast_index(index.storage)
index_l2.sync_l2norms()
D2, I2 = index.search(xq, k)
index_l2.clear_l2norms()
D3, I3 = index.search(xq, k)
# not too many elements are off.
self.assertLessEqual((I2 != I1).sum(), 1)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_almost_equal(D2, D1, decimal=5)
# not too many elements are off.
self.assertLessEqual((I3 != I1).sum(), 0)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_equal(D3, D1)
| TestIndexFlatL2 |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 56318,
"end": 56392
} | class ____(atlas_3_10_threads_info):
pass
| lapack_atlas_3_10_threads_info |
python | huggingface__transformers | src/transformers/models/vitdet/modeling_vitdet.py | {
"start": 12536,
"end": 13445
} | class ____(nn.Module):
"""
A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the
channel dimension for inputs that have shape (batch_size, channels, height, width).
https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119
"""
def __init__(self, normalized_shape, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.normalized_shape = (normalized_shape,)
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
| VitDetLayerNorm |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 51994,
"end": 52077
} | class ____(TestMaskedArrayMethods, QuantitySetup):
pass
| TestMaskedQuantityMethods |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py | {
"start": 705,
"end": 1202
} | class ____(BuilderWithDefaults):
phases = ("install",)
package_methods = ("test_imports",)
package_attributes = ("archive_files", "build_directory", "install_time_test_callbacks")
install_time_test_callbacks = ["test_imports"]
@property
def build_directory(self) -> str:
return self.pkg.stage.source_path
def install(self, pkg: PythonPackage, spec: Spec, prefix: Prefix) -> None:
pass
run_after("install")(execute_install_time_tests)
| PythonPipBuilder |
python | getsentry__sentry-python | sentry_sdk/integrations/asgi.py | {
"start": 2048,
"end": 12800
} | class ____:
__slots__ = (
"app",
"__call__",
"transaction_style",
"mechanism_type",
"span_origin",
"http_methods_to_capture",
)
def __init__(
self,
app, # type: Any
unsafe_context_data=False, # type: bool
transaction_style="endpoint", # type: str
mechanism_type="asgi", # type: str
span_origin="manual", # type: str
http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE, # type: Tuple[str, ...]
asgi_version=None, # type: Optional[int]
):
# type: (...) -> None
"""
Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
"""
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
asgi_middleware_while_using_starlette_or_fastapi = (
mechanism_type == "asgi" and "starlette" in _get_installed_modules()
)
if asgi_middleware_while_using_starlette_or_fastapi:
logger.warning(
"The Sentry Python SDK can now automatically support ASGI frameworks like Starlette and FastAPI. "
"Please remove 'SentryAsgiMiddleware' from your project. "
"See https://docs.sentry.io/platforms/python/guides/asgi/ for more information."
)
self.transaction_style = transaction_style
self.mechanism_type = mechanism_type
self.span_origin = span_origin
self.app = app
self.http_methods_to_capture = http_methods_to_capture
if asgi_version is None:
if _looks_like_asgi3(app):
asgi_version = 3
else:
asgi_version = 2
if asgi_version == 3:
self.__call__ = self._run_asgi3
elif asgi_version == 2:
self.__call__ = self._run_asgi2 # type: ignore
def _capture_lifespan_exception(self, exc):
# type: (Exception) -> None
"""Capture exceptions raise in application lifespan handlers.
The separate function is needed to support overriding in derived integrations that use different catching mechanisms.
"""
return _capture_exception(exc=exc, mechanism_type=self.mechanism_type)
def _capture_request_exception(self, exc):
# type: (Exception) -> None
"""Capture exceptions raised in incoming request handlers.
The separate function is needed to support overriding in derived integrations that use different catching mechanisms.
"""
return _capture_exception(exc=exc, mechanism_type=self.mechanism_type)
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, receive, send, asgi_version=2)
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, receive, send, asgi_version=3)
async def _run_app(self, scope, receive, send, asgi_version):
# type: (Any, Any, Any, int) -> Any
is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)
is_lifespan = scope["type"] == "lifespan"
if is_recursive_asgi_middleware or is_lifespan:
try:
if asgi_version == 2:
return await self.app(scope)(receive, send)
else:
return await self.app(scope, receive, send)
except Exception as exc:
self._capture_lifespan_exception(exc)
raise exc from None
_asgi_middleware_applied.set(True)
try:
with sentry_sdk.isolation_scope() as sentry_scope:
with track_session(sentry_scope, session_mode="request"):
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
(
transaction_name,
transaction_source,
) = self._get_transaction_name_and_source(
self.transaction_style,
scope,
)
method = scope.get("method", "").upper()
transaction = None
if ty in ("http", "websocket"):
if ty == "websocket" or method in self.http_methods_to_capture:
transaction = continue_trace(
_get_headers(scope),
op="{}.server".format(ty),
name=transaction_name,
source=transaction_source,
origin=self.span_origin,
)
else:
transaction = Transaction(
op=OP.HTTP_SERVER,
name=transaction_name,
source=transaction_source,
origin=self.span_origin,
)
if transaction:
transaction.set_tag("asgi.type", ty)
transaction_context = (
sentry_sdk.start_transaction(
transaction,
custom_sampling_context={"asgi_scope": scope},
)
if transaction is not None
else nullcontext()
)
with transaction_context:
try:
async def _sentry_wrapped_send(event):
# type: (Dict[str, Any]) -> Any
if transaction is not None:
is_http_response = (
event.get("type") == "http.response.start"
and "status" in event
)
if is_http_response:
transaction.set_http_status(event["status"])
return await send(event)
if asgi_version == 2:
return await self.app(scope)(
receive, _sentry_wrapped_send
)
else:
return await self.app(
scope, receive, _sentry_wrapped_send
)
except Exception as exc:
self._capture_request_exception(exc)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_data = event.get("request", {})
request_data.update(_get_request_data(asgi_scope))
event["request"] = deepcopy(request_data)
# Only set transaction name if not already set by Starlette or FastAPI (or other frameworks)
transaction = event.get("transaction")
transaction_source = (event.get("transaction_info") or {}).get("source")
already_set = (
transaction is not None
and transaction != _DEFAULT_TRANSACTION_NAME
and transaction_source
in [
TransactionSource.COMPONENT,
TransactionSource.ROUTE,
TransactionSource.CUSTOM,
]
)
if not already_set:
name, source = self._get_transaction_name_and_source(
self.transaction_style, asgi_scope
)
event["transaction"] = name
event["transaction_info"] = {"source": source}
return event
# Helper functions.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
# for that.
def _get_transaction_name_and_source(self, transaction_style, asgi_scope):
# type: (SentryAsgiMiddleware, str, Any) -> Tuple[str, str]
name = None
source = SOURCE_FOR_STYLE[transaction_style]
ty = asgi_scope.get("type")
if transaction_style == "endpoint":
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
name = transaction_from_function(endpoint) or ""
else:
name = _get_url(asgi_scope, "http" if ty == "http" else "ws", host=None)
source = TransactionSource.URL
elif transaction_style == "url":
# FastAPI includes the route object in the scope to let Sentry extract the
# path from it for the transaction name
route = asgi_scope.get("route")
if route:
path = getattr(route, "path", None)
if path is not None:
name = path
else:
name = _get_url(asgi_scope, "http" if ty == "http" else "ws", host=None)
source = TransactionSource.URL
if name is None:
name = _DEFAULT_TRANSACTION_NAME
source = TransactionSource.ROUTE
return name, source
return name, source
| SentryAsgiMiddleware |
python | celery__celery | t/smoke/tests/test_control.py | {
"start": 44,
"end": 688
} | class ____:
def test_sanity(self, celery_setup: CeleryTestSetup):
r = celery_setup.app.control.ping()
assert all(
[
all([res["ok"] == "pong" for _, res in response.items()])
for response in r
]
)
def test_shutdown_exit_with_zero(self, celery_setup: CeleryTestSetup):
celery_setup.app.control.shutdown(destination=[celery_setup.worker.hostname()])
while celery_setup.worker.container.status != "exited":
celery_setup.worker.container.reload()
assert celery_setup.worker.container.attrs["State"]["ExitCode"] == 0
| test_control |
python | facebook__pyre-check | client/log/tests/log_test.py | {
"start": 233,
"end": 509
} | class ____(unittest.TestCase):
def test_truncate(self) -> None:
self.assertEqual(log.truncate("a", 10), "a")
self.assertEqual(log.truncate("a" * 10, 10), "a" * 10)
self.assertEqual(log.truncate("123456789", 4), "1234..[truncated 5 characters]")
| LogTest |
python | weaviate__weaviate-python-client | profiling/test_refs.py | {
"start": 1235,
"end": 1310
} | class ____:
to_class: str
to_uuid: uuid_lib.UUID
@dataclass
| Reference |
python | ray-project__ray | python/ray/serve/_private/deployment_scheduler.py | {
"start": 910,
"end": 1048
} | class ____:
"""A scheduling policy that spreads replicas with best effort."""
pass
@total_ordering
| SpreadDeploymentSchedulingPolicy |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_barcode.py | {
"start": 1968,
"end": 5048
} | class ____(ColumnMapExpectation):
"""Expect the provided barcodes are valid (barcode type passed in parameter).
Barcode types: code39, ean, ean8, ean13, gtin, gtin14, gs1_datamatrix, isbn, isbn10, isbn13, upc
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"9788478290222",
"9780201379624",
"1010202030308",
"1010202030315",
"1010202030322",
],
"some_other": [
"9788478290222a",
"9788478290222b",
"9788478290222c",
"9788478290222_",
"9788478290222d",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_valid",
"barcode_type": "ean13",
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "some_other",
"barcode_type": "ean13",
"mostly": 0.9,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_barcode"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"barcode_type",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["barcodenumber"],
}
success_keys = (
"barcode_type",
"mostly",
)
if __name__ == "__main__":
ExpectColumnValuesToBeValidBarcode().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidBarcode |
python | kamyu104__LeetCode-Solutions | Python/maximum-alternating-subarray-sum.py | {
"start": 29,
"end": 543
} | class ____(object):
def maximumAlternatingSubarraySum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def kadane(nums, start):
result = float("-inf")
curr = odd = 0
for i in xrange(start, len(nums)):
curr = (curr+nums[i]) if not odd else max(curr-nums[i], 0)
result = max(result, curr)
odd ^= 1
return result
return max(kadane(nums, 0), kadane(nums, 1))
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 28682,
"end": 30946
} | class ____(MapPartitions):
_parameters = [
"frame",
"func",
"before",
"after",
"meta",
"enforce_metadata",
"transform_divisions",
"clear_divisions",
"align_dataframes",
"token",
"kwargs",
]
_defaults: dict = {
"meta": None,
"enfore_metadata": True,
"transform_divisions": True,
"kwargs": None,
"clear_divisions": False,
"align_dataframes": False,
"token": None,
}
@functools.cached_property
def _kwargs(self) -> dict:
kwargs = self.kwargs
if kwargs is None:
kwargs = {}
return kwargs
@property
def args(self):
return (
[self.frame]
+ [self.func, self.before, self.after]
+ self.operands[len(self._parameters) :]
)
@functools.cached_property
def _meta(self):
meta = self.operand("meta")
args = [self.frame._meta] + [
arg._meta if isinstance(arg, Expr) else arg
for arg in self.operands[len(self._parameters) :]
]
return _get_meta_map_partitions(
args,
[self.dependencies()[0]],
self.func,
self.kwargs,
meta,
self.kwargs.pop("parent_meta", None),
)
@functools.cached_property
def before(self):
before = self.operand("before")
if isinstance(before, str):
return pd.to_timedelta(before)
return before
@functools.cached_property
def after(self):
after = self.operand("after")
if isinstance(after, str):
return pd.to_timedelta(after)
return after
def _lower(self):
overlapped = CreateOverlappingPartitions(self.frame, self.before, self.after)
return MapPartitions(
overlapped,
_overlap_chunk,
self._meta,
self.enforce_metadata,
self.transform_divisions,
self.clear_divisions,
self.align_dataframes,
None,
None,
self.token,
self._kwargs,
len(self.args[1:]),
*self.args[1:],
)
| MapOverlap |
python | doocs__leetcode | solution/0300-0399/0309.Best Time to Buy and Sell Stock with Cooldown/Solution2.py | {
"start": 0,
"end": 345
} | class ____:
def maxProfit(self, prices: List[int]) -> int:
n = len(prices)
f = [[0] * 2 for _ in range(n)]
f[0][1] = -prices[0]
for i in range(1, n):
f[i][0] = max(f[i - 1][0], f[i - 1][1] + prices[i])
f[i][1] = max(f[i - 1][1], f[i - 2][0] - prices[i])
return f[n - 1][0]
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/constructors.py | {
"start": 1826,
"end": 1957
} | class ____(BaseConstructor):
def __init__(self, y: int) -> None:
super().__init__()
self.y = y
| DerivedConstructor |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/api/test_operations.py | {
"start": 18258,
"end": 19876
} | class ____:
section: str = "core"
option: str = "config"
def test_get(self):
response_config = Config(
sections=[
ConfigSection(
name=self.section,
options=[
ConfigOption(
key=self.option,
value="config",
)
],
)
]
)
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/config/section/{self.section}/option/{self.option}"
return httpx.Response(200, json=response_config.model_dump())
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.configs.get(section=self.section, option=self.option)
assert response == response_config
def test_list(self):
response_config = Config(
sections=[
ConfigSection(name="section-1", options=[ConfigOption(key="option-1", value="value-1")]),
ConfigSection(name="section-2", options=[ConfigOption(key="option-2", value="value-2")]),
]
)
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/config"
return httpx.Response(200, json=response_config.model_dump())
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.configs.list()
assert response == response_config
| TestConfigOperations |
python | getsentry__sentry | tests/sentry/integrations/msteams/webhook/test_ms_teams_webhook_endpoint.py | {
"start": 157,
"end": 2978
} | class ____(TestCase):
def setUp(self) -> None:
self._example_request_data = {
"entities": [{"type": "clientInfo", "locale": "en-US"}],
"timestamp": "2024-03-21T18:41:30.088Z",
"action": "add",
"recipient": {"name": "Sentry", "id": "28:8922afe2-d747-4ae9-9bce-fa2e6f4631f6"},
"locale": "en-US",
"channelId": "msteams",
"from": {
"aadObjectId": "8a9a85f5-748b-4d75-baa5-b8d2f6bfe209",
"id": "29:1OG0nX1xCYfjz1_OSjsk4d5Ix51njAv7AMuc3fq18b0URfOSHBQs58aGFgsVJm4f--gX-EQSV8o_pbHXc-gZ9dA",
},
"type": "installationUpdate",
"conversation": {
"tenantId": "ce067f64-338d-44a0-89fb-7fc8973e254f",
"isGroup": "True",
"id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2",
"conversationType": "channel",
},
"channelData": {
"channel": {"id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2"},
"team": {
"name": "Sales and Marketing",
"aadGroupId": "3d5d4c90-1ae9-41c7-9471-7ccd37ddb7d4",
"id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2",
},
"settings": {
"selectedChannel": {"id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2"}
},
"source": {"name": "message"},
"tenant": {"id": "ce067f64-338d-44a0-89fb-7fc8973e254f"},
},
"serviceUrl": "https://smba.trafficmanager.net/amer/",
"id": "f:1af81a4d-ed72-647d-c803-1681b91a7fa4",
}
def test_with_example_request(self) -> None:
response = MsTeamsWebhookEndpoint._get_team_installation_request_data(
self._example_request_data
)
assert response == {
"conversation_id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2",
"external_id": "19:7c8cd8b4b4ad4e73a2957e6daad706ef@thread.tacv2",
"external_name": "Sales and Marketing",
"installation_type": "team",
"service_url": "https://smba.trafficmanager.net/amer/",
"tenant_id": "ce067f64-338d-44a0-89fb-7fc8973e254f",
"user_id": "29:1OG0nX1xCYfjz1_OSjsk4d5Ix51njAv7AMuc3fq18b0URfOSHBQs58aGFgsVJm4f--gX-EQSV8o_pbHXc-gZ9dA",
}
def test_raises_error_with_missing_data(self) -> None:
bad_request_data: dict[str, Any] = self._example_request_data.copy()
bad_request_data["channelData"].pop("tenant", None) # Remove "tenant" key
with pytest.raises(KeyError):
MsTeamsWebhookEndpoint._get_team_installation_request_data(bad_request_data)
| TestGeTeamInstallationRequestData |
python | python__mypy | test-data/unit/plugins/common_api_incremental.py | {
"start": 235,
"end": 1613
} | class ____(Plugin):
def get_dynamic_class_hook(
self, fullname: str
) -> Callable[[DynamicClassDefContext], None] | None:
if fullname == "lib.declarative_base":
return add_info_hook
return None
def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo):
if sym.node.metadata.get("magic"):
return add_magic_hook
return None
def add_info_hook(ctx: DynamicClassDefContext) -> None:
class_def = ClassDef(ctx.name, Block([]))
class_def.fullname = ctx.api.qualified_name(ctx.name)
info = TypeInfo(SymbolTable(), class_def, ctx.api.cur_mod_id)
class_def.info = info
obj = ctx.api.named_type("builtins.object", [])
info.mro = [info, obj.type]
info.bases = [obj]
ctx.api.add_symbol_table_node(ctx.name, SymbolTableNode(GDEF, info))
info.metadata["magic"] = {"value": True}
def add_magic_hook(ctx: ClassDefContext) -> None:
info = ctx.cls.info
str_type = ctx.api.named_type_or_none("builtins.str", [])
assert str_type is not None
var = Var("__magic__", str_type)
var.info = info
info.names["__magic__"] = SymbolTableNode(MDEF, var)
def plugin(version: str) -> type[DynPlugin]:
return DynPlugin
| DynPlugin |
python | django__django | tests/auth_tests/test_mixins.py | {
"start": 434,
"end": 525
} | class ____(UserPassesTestMixin):
def test_func(self):
return True
| AlwaysTrueMixin |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 95288,
"end": 95467
} | class ____(Structure):
_fields_ = [("sessionsCount", c_uint),
("averageFPS", c_uint),
("averageLatency", c_uint)
]
| c_nvmlFBCStats_t |
python | facebookresearch__faiss | benchs/bench_fw/benchmark.py | {
"start": 6335,
"end": 6896
} | class ____:
num_threads: int
distance_metric: str
def __post_init__(self):
if self.distance_metric == "IP":
self.distance_metric_type = faiss.METRIC_INNER_PRODUCT
elif self.distance_metric == "L2":
self.distance_metric_type = faiss.METRIC_L2
else:
raise ValueError
def set_io(self, benchmark_io: BenchmarkIO):
self.io = benchmark_io
self.io.distance_metric = self.distance_metric
self.io.distance_metric_type = self.distance_metric_type
@dataclass
| IndexOperator |
python | lxml__lxml | src/lxml/tests/test_incremental_xmlfile.py | {
"start": 14288,
"end": 15063
} | class ____(_XmlFileTestCaseBase):
def setUp(self):
self._tmpfile = tempfile.NamedTemporaryFile()
self._file = self._tmpfile.name
def tearDown(self):
try:
self._tmpfile.close()
finally:
if os.path.exists(self._tmpfile.name):
os.unlink(self._tmpfile.name)
def _read_file(self):
self._tmpfile.seek(0)
return self._tmpfile.read()
def _parse_file(self):
self._tmpfile.seek(0)
return etree.parse(self._tmpfile)
@skipIf(True, "temp file behaviour is too platform specific here")
def test_buffering(self):
pass
@skipIf(True, "temp file behaviour is too platform specific here")
def test_flush(self):
pass
| TempPathXmlFileTestCase |
python | django__django | tests/queries/tests.py | {
"start": 77051,
"end": 77630
} | class ____(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
| SubclassFKTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias7.py | {
"start": 326,
"end": 383
} | class ____(Generic[TResult]):
Response: TResult
| Context |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 220200,
"end": 221048
} | class ____(TestCase):
def test_concurrent_calls(self):
result = 0
result_lock = Lock()
def producer(limit):
'Non-concurrent producer. A generator version of range(limit).'
for x in range(limit):
yield x
def consumer(counter):
'Concurrent data consumer'
nonlocal result
total = 0
for x in counter:
total += x
with result_lock:
result += total
limit = 10**6
counter = mi.serialize(producer(limit))
workers = [Thread(target=consumer, args=[counter]) for _ in range(10)]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
self.assertEqual(result, limit * (limit - 1) // 2)
| TestSerialize |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/encoders.py | {
"start": 3608,
"end": 4392
} | class ____(nn.Module):
def __init__(self, input_size: int, normalize: bool = False):
super().__init__()
self.normalizer: Optional[Normalizer] = None
if normalize:
self.normalizer = Normalizer(input_size)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
if self.normalizer is not None:
inputs = self.normalizer(inputs)
return inputs
def copy_normalization(self, other_input: "VectorInput") -> None:
if self.normalizer is not None and other_input.normalizer is not None:
self.normalizer.copy_from(other_input.normalizer)
def update_normalization(self, inputs: torch.Tensor) -> None:
if self.normalizer is not None:
self.normalizer.update(inputs)
| VectorInput |
python | numpy__numpy | numpy/f2py/tests/test_character.py | {
"start": 5196,
"end": 14990
} | class ____(util.F2PyTest):
# options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py']
suffix = '.f90'
fprefix = 'test_character'
code = textwrap.dedent(f"""
subroutine {fprefix}_input(c, o)
character, intent(in) :: c
integer*1 o
!f2py intent(out) o
o = transfer(c, o)
end subroutine {fprefix}_input
subroutine {fprefix}_output(c, o)
character :: c
integer*1, intent(in) :: o
!f2py intent(out) c
c = transfer(o, c)
end subroutine {fprefix}_output
subroutine {fprefix}_input_output(c, o)
character, intent(in) :: c
character o
!f2py intent(out) o
o = c
end subroutine {fprefix}_input_output
subroutine {fprefix}_inout(c, n)
character :: c, n
!f2py intent(in) n
!f2py intent(inout) c
c = n
end subroutine {fprefix}_inout
function {fprefix}_return(o) result (c)
character :: c
character, intent(in) :: o
c = transfer(o, c)
end function {fprefix}_return
subroutine {fprefix}_array_input(c, o)
character, intent(in) :: c(3)
integer*1 o(3)
!f2py intent(out) o
integer i
do i=1,3
o(i) = transfer(c(i), o(i))
end do
end subroutine {fprefix}_array_input
subroutine {fprefix}_2d_array_input(c, o)
character, intent(in) :: c(2, 3)
integer*1 o(2, 3)
!f2py intent(out) o
integer i, j
do i=1,2
do j=1,3
o(i, j) = transfer(c(i, j), o(i, j))
end do
end do
end subroutine {fprefix}_2d_array_input
subroutine {fprefix}_array_output(c, o)
character :: c(3)
integer*1, intent(in) :: o(3)
!f2py intent(out) c
do i=1,3
c(i) = transfer(o(i), c(i))
end do
end subroutine {fprefix}_array_output
subroutine {fprefix}_array_inout(c, n)
character :: c(3), n(3)
!f2py intent(in) n(3)
!f2py intent(inout) c(3)
do i=1,3
c(i) = n(i)
end do
end subroutine {fprefix}_array_inout
subroutine {fprefix}_2d_array_inout(c, n)
character :: c(2, 3), n(2, 3)
!f2py intent(in) n(2, 3)
!f2py intent(inout) c(2. 3)
integer i, j
do i=1,2
do j=1,3
c(i, j) = n(i, j)
end do
end do
end subroutine {fprefix}_2d_array_inout
function {fprefix}_array_return(o) result (c)
character, dimension(3) :: c
character, intent(in) :: o(3)
do i=1,3
c(i) = o(i)
end do
end function {fprefix}_array_return
function {fprefix}_optional(o) result (c)
character, intent(in) :: o
!f2py character o = "a"
character :: c
c = o
end function {fprefix}_optional
""")
@pytest.mark.parametrize("dtype", ['c', 'S1'])
def test_input(self, dtype):
f = getattr(self.module, self.fprefix + '_input')
assert_equal(f(np.array('a', dtype=dtype)), ord('a'))
assert_equal(f(np.array(b'a', dtype=dtype)), ord('a'))
assert_equal(f(np.array(['a'], dtype=dtype)), ord('a'))
assert_equal(f(np.array('abc', dtype=dtype)), ord('a'))
assert_equal(f(np.array([['a']], dtype=dtype)), ord('a'))
def test_input_varia(self):
f = getattr(self.module, self.fprefix + '_input')
assert_equal(f('a'), ord('a'))
assert_equal(f(b'a'), ord(b'a'))
assert_equal(f(''), 0)
assert_equal(f(b''), 0)
assert_equal(f(b'\0'), 0)
assert_equal(f('ab'), ord('a'))
assert_equal(f(b'ab'), ord('a'))
assert_equal(f(['a']), ord('a'))
assert_equal(f(np.array(b'a')), ord('a'))
assert_equal(f(np.array([b'a'])), ord('a'))
a = np.array('a')
assert_equal(f(a), ord('a'))
a = np.array(['a'])
assert_equal(f(a), ord('a'))
try:
f([])
except IndexError as msg:
if not str(msg).endswith(' got 0-list'):
raise
else:
raise SystemError(f'{f.__name__} should have failed on empty list')
try:
f(97)
except TypeError as msg:
if not str(msg).endswith(' got int instance'):
raise
else:
raise SystemError(f'{f.__name__} should have failed on int value')
@pytest.mark.parametrize("dtype", ['c', 'S1', 'U1'])
def test_array_input(self, dtype):
f = getattr(self.module, self.fprefix + '_array_input')
assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)),
np.array(list(map(ord, 'abc')), dtype='i1'))
assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)),
np.array(list(map(ord, 'abc')), dtype='i1'))
def test_array_input_varia(self):
f = getattr(self.module, self.fprefix + '_array_input')
assert_array_equal(f(['a', 'b', 'c']),
np.array(list(map(ord, 'abc')), dtype='i1'))
assert_array_equal(f([b'a', b'b', b'c']),
np.array(list(map(ord, 'abc')), dtype='i1'))
try:
f(['a', 'b', 'c', 'd'])
except ValueError as msg:
if not str(msg).endswith(
'th dimension must be fixed to 3 but got 4'):
raise
else:
raise SystemError(
f'{f.__name__} should have failed on wrong input')
@pytest.mark.parametrize("dtype", ['c', 'S1', 'U1'])
def test_2d_array_input(self, dtype):
f = getattr(self.module, self.fprefix + '_2d_array_input')
a = np.array([['a', 'b', 'c'],
['d', 'e', 'f']], dtype=dtype, order='F')
expected = a.view(np.uint32 if dtype == 'U1' else np.uint8)
assert_array_equal(f(a), expected)
def test_output(self):
f = getattr(self.module, self.fprefix + '_output')
assert_equal(f(ord(b'a')), b'a')
assert_equal(f(0), b'\0')
def test_array_output(self):
f = getattr(self.module, self.fprefix + '_array_output')
assert_array_equal(f(list(map(ord, 'abc'))),
np.array(list('abc'), dtype='S1'))
def test_input_output(self):
f = getattr(self.module, self.fprefix + '_input_output')
assert_equal(f(b'a'), b'a')
assert_equal(f('a'), b'a')
assert_equal(f(''), b'\0')
@pytest.mark.parametrize("dtype", ['c', 'S1'])
def test_inout(self, dtype):
f = getattr(self.module, self.fprefix + '_inout')
a = np.array(list('abc'), dtype=dtype)
f(a, 'A')
assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype))
f(a[1:], 'B')
assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype))
a = np.array(['abc'], dtype=dtype)
f(a, 'A')
assert_array_equal(a, np.array(['Abc'], dtype=a.dtype))
def test_inout_varia(self):
f = getattr(self.module, self.fprefix + '_inout')
a = np.array('abc', dtype='S3')
f(a, 'A')
assert_array_equal(a, np.array('Abc', dtype=a.dtype))
a = np.array(['abc'], dtype='S3')
f(a, 'A')
assert_array_equal(a, np.array(['Abc'], dtype=a.dtype))
try:
f('abc', 'A')
except ValueError as msg:
if not str(msg).endswith(' got 3-str'):
raise
else:
raise SystemError(f'{f.__name__} should have failed on str value')
@pytest.mark.parametrize("dtype", ['c', 'S1'])
def test_array_inout(self, dtype):
f = getattr(self.module, self.fprefix + '_array_inout')
n = np.array(['A', 'B', 'C'], dtype=dtype, order='F')
a = np.array(['a', 'b', 'c'], dtype=dtype, order='F')
f(a, n)
assert_array_equal(a, n)
a = np.array(['a', 'b', 'c', 'd'], dtype=dtype)
f(a[1:], n)
assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype))
a = np.array([['a', 'b', 'c']], dtype=dtype, order='F')
f(a, n)
assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype))
a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F')
try:
f(a, n)
except ValueError as msg:
if not str(msg).endswith(
'th dimension must be fixed to 3 but got 4'):
raise
else:
raise SystemError(
f'{f.__name__} should have failed on wrong input')
@pytest.mark.parametrize("dtype", ['c', 'S1'])
def test_2d_array_inout(self, dtype):
f = getattr(self.module, self.fprefix + '_2d_array_inout')
n = np.array([['A', 'B', 'C'],
['D', 'E', 'F']],
dtype=dtype, order='F')
a = np.array([['a', 'b', 'c'],
['d', 'e', 'f']],
dtype=dtype, order='F')
f(a, n)
assert_array_equal(a, n)
def test_return(self):
f = getattr(self.module, self.fprefix + '_return')
assert_equal(f('a'), b'a')
@pytest.mark.skip('fortran function returning array segfaults')
def test_array_return(self):
f = getattr(self.module, self.fprefix + '_array_return')
a = np.array(list('abc'), dtype='S1')
assert_array_equal(f(a), a)
def test_optional(self):
f = getattr(self.module, self.fprefix + '_optional')
assert_equal(f(), b"a")
assert_equal(f(b'B'), b"B")
| TestCharacter |
python | spack__spack | lib/spack/spack/spec_parser.py | {
"start": 4843,
"end": 6991
} | class ____(TokenBase):
"""Enumeration of the different token kinds of tokens in the spec grammar.
Order of declaration is extremely important, since text containing specs is parsed with a
single regex obtained by ``"|".join(...)`` of all the regex in the order of declaration.
"""
# Dependency, with optional virtual assignment specifier
START_EDGE_PROPERTIES = r"(?:(?:\^|\%\%|\%)\[)"
END_EDGE_PROPERTIES = rf"(?:\](?:\s*{VIRTUAL_ASSIGNMENT})?)"
DEPENDENCY = rf"(?:(?:\^|\%\%|\%)(?:\s*{VIRTUAL_ASSIGNMENT})?)"
# Version
VERSION_HASH_PAIR = rf"(?:@(?:{GIT_VERSION_PATTERN})=(?:{VERSION}))"
GIT_VERSION = rf"@(?:{GIT_VERSION_PATTERN})"
VERSION = rf"(?:@\s*(?:{VERSION_LIST}))"
# Variants
PROPAGATED_BOOL_VARIANT = rf"(?:(?:\+\+|~~|--)\s*{NAME})"
BOOL_VARIANT = rf"(?:[~+-]\s*{NAME})"
PROPAGATED_KEY_VALUE_PAIR = rf"(?:{NAME}:?==(?:{VALUE}|{QUOTED_VALUE}))"
KEY_VALUE_PAIR = rf"(?:{NAME}:?=(?:{VALUE}|{QUOTED_VALUE}))"
# FILENAME
FILENAME = rf"(?:{FILENAME})"
# Package name
FULLY_QUALIFIED_PACKAGE_NAME = rf"(?:{DOTTED_IDENTIFIER})"
UNQUALIFIED_PACKAGE_NAME = rf"(?:{IDENTIFIER}|{STAR})"
# DAG hash
DAG_HASH = rf"(?:/(?:{HASH}))"
# White spaces
WS = r"(?:\s+)"
# Unexpected character(s)
UNEXPECTED = r"(?:.[\s]*)"
#: Tokenizer that includes all the regexes in the SpecTokens enum
SPEC_TOKENIZER = Tokenizer(SpecTokens)
def tokenize(text: str) -> Iterator[Token]:
"""Return a token generator from the text passed as input.
Raises:
SpecTokenizationError: when unexpected characters are found in the text
"""
for token in SPEC_TOKENIZER.tokenize(text):
if token.kind == SpecTokens.UNEXPECTED:
raise SpecTokenizationError(list(SPEC_TOKENIZER.tokenize(text)), text)
yield token
def parseable_tokens(text: str) -> Iterator[Token]:
"""Return non-whitespace tokens from the text passed as input
Raises:
SpecTokenizationError: when unexpected characters are found in the text
"""
return filter(lambda x: x.kind != SpecTokens.WS, tokenize(text))
| SpecTokens |
python | chroma-core__chroma | chromadb/segment/impl/vector/local_persistent_hnsw.py | {
"start": 2230,
"end": 22149
} | class ____(LocalHnswSegment):
METADATA_FILE: str = "index_metadata.pickle"
# How many records to add to index at once, we do this because crossing the python/c++ boundary is expensive (for add())
# When records are not added to the c++ index, they are buffered in memory and served
# via brute force search.
_batch_size: int
_brute_force_index: Optional[BruteForceIndex]
_index_initialized: bool = False
_curr_batch: Batch
# How many records to add to index before syncing to disk
_sync_threshold: int
_persist_data: PersistentData
_persist_directory: str
_allow_reset: bool
_db: SqliteDB
_opentelemtry_client: OpenTelemetryClient
_num_log_records_since_last_batch: int = 0
_num_log_records_since_last_persist: int = 0
def __init__(self, system: System, segment: Segment):
super().__init__(system, segment)
self._db = system.instance(SqliteDB)
self._opentelemtry_client = system.require(OpenTelemetryClient)
self._params = PersistentHnswParams(segment["metadata"] or {})
self._batch_size = self._params.batch_size
self._sync_threshold = self._params.sync_threshold
self._allow_reset = system.settings.allow_reset
self._persist_directory = system.settings.require("persist_directory")
self._curr_batch = Batch()
self._brute_force_index = None
if not os.path.exists(self._get_storage_folder()):
os.makedirs(self._get_storage_folder(), exist_ok=True)
# Load persist data if it exists already, otherwise create it
if self._index_exists():
self._persist_data = PersistentData.load_from_file(
self._get_metadata_file()
)
self._dimensionality = self._persist_data.dimensionality
self._total_elements_added = self._persist_data.total_elements_added
self._id_to_label = self._persist_data.id_to_label
self._label_to_id = self._persist_data.label_to_id
self._id_to_seq_id = self._persist_data.id_to_seq_id
# If the index was written to, we need to re-initialize it
if len(self._id_to_label) > 0:
self._dimensionality = cast(int, self._dimensionality)
self._init_index(self._dimensionality)
else:
self._persist_data = PersistentData(
self._dimensionality,
self._total_elements_added,
self._id_to_label,
self._label_to_id,
self._id_to_seq_id,
)
# Hydrate the max_seq_id
with self._db.tx() as cur:
t = Table("max_seq_id")
q = (
self._db.querybuilder()
.from_(t)
.select(t.seq_id)
.where(t.segment_id == ParameterValue(self._db.uuid_to_db(self._id)))
.limit(1)
)
sql, params = get_sql(q)
cur.execute(sql, params)
result = cur.fetchone()
if result:
self._max_seq_id = result[0]
elif self._index_exists():
# Migrate the max_seq_id from the legacy field in the pickled file to the SQLite database
q = (
self._db.querybuilder()
.into(Table("max_seq_id"))
.columns("segment_id", "seq_id")
.insert(
ParameterValue(self._db.uuid_to_db(self._id)),
ParameterValue(self._persist_data.max_seq_id),
)
)
sql, params = get_sql(q)
cur.execute(sql, params)
self._max_seq_id = self._persist_data.max_seq_id
else:
self._max_seq_id = self._consumer.min_seqid()
@staticmethod
@override
def propagate_collection_metadata(metadata: Metadata) -> Optional[Metadata]:
# Extract relevant metadata
segment_metadata = PersistentHnswParams.extract(metadata)
return segment_metadata
def _index_exists(self) -> bool:
"""Check if the index exists via the metadata file"""
return os.path.exists(self._get_metadata_file())
def _get_metadata_file(self) -> str:
"""Get the metadata file path"""
return os.path.join(self._get_storage_folder(), self.METADATA_FILE)
def _get_storage_folder(self) -> str:
"""Get the storage folder path"""
folder = os.path.join(self._persist_directory, str(self._id))
return folder
@trace_method(
"PersistentLocalHnswSegment._init_index", OpenTelemetryGranularity.ALL
)
@override
def _init_index(self, dimensionality: int) -> None:
index = hnswlib.Index(space=self._params.space, dim=dimensionality)
self._brute_force_index = BruteForceIndex(
size=self._batch_size,
dimensionality=dimensionality,
space=self._params.space,
)
# Check if index exists and load it if it does
if self._index_exists():
index.load_index(
self._get_storage_folder(),
is_persistent_index=True,
max_elements=int(
max(
self.count(
request_version_context=RequestVersionContext(
collection_version=0, log_position=0
)
)
* self._params.resize_factor,
DEFAULT_CAPACITY,
)
),
)
else:
index.init_index(
max_elements=DEFAULT_CAPACITY,
ef_construction=self._params.construction_ef,
M=self._params.M,
is_persistent_index=True,
persistence_location=self._get_storage_folder(),
)
index.set_ef(self._params.search_ef)
index.set_num_threads(self._params.num_threads)
self._index = index
self._dimensionality = dimensionality
self._index_initialized = True
@trace_method("PersistentLocalHnswSegment._persist", OpenTelemetryGranularity.ALL)
def _persist(self) -> None:
"""Persist the index and data to disk"""
index = cast(hnswlib.Index, self._index)
# Persist the index
index.persist_dirty()
# Persist the metadata
self._persist_data.dimensionality = self._dimensionality
self._persist_data.total_elements_added = self._total_elements_added
# TODO: This should really be stored in sqlite, the index itself, or a better
# storage format
self._persist_data.id_to_label = self._id_to_label
self._persist_data.label_to_id = self._label_to_id
self._persist_data.id_to_seq_id = self._id_to_seq_id
with open(self._get_metadata_file(), "wb") as metadata_file:
pickle.dump(self._persist_data, metadata_file, pickle.HIGHEST_PROTOCOL)
with self._db.tx() as cur:
q = (
self._db.querybuilder()
.into(Table("max_seq_id"))
.columns("segment_id", "seq_id")
.insert(
ParameterValue(self._db.uuid_to_db(self._id)),
ParameterValue(self._max_seq_id),
)
)
sql, params = get_sql(q)
sql = sql.replace("INSERT", "INSERT OR REPLACE")
cur.execute(sql, params)
self._num_log_records_since_last_persist = 0
@trace_method(
"PersistentLocalHnswSegment._apply_batch", OpenTelemetryGranularity.ALL
)
@override
def _apply_batch(self, batch: Batch) -> None:
super()._apply_batch(batch)
if self._num_log_records_since_last_persist >= self._sync_threshold:
self._persist()
self._num_log_records_since_last_batch = 0
@trace_method(
"PersistentLocalHnswSegment._write_records", OpenTelemetryGranularity.ALL
)
@override
def _write_records(self, records: Sequence[LogRecord]) -> None:
"""Add a batch of embeddings to the index"""
if not self._running:
raise RuntimeError("Cannot add embeddings to stopped component")
with WriteRWLock(self._lock):
for record in records:
self._num_log_records_since_last_batch += 1
self._num_log_records_since_last_persist += 1
if record["record"]["embedding"] is not None:
self._ensure_index(len(records), len(record["record"]["embedding"]))
if not self._index_initialized:
# If the index is not initialized here, it means that we have
# not yet added any records to the index. So we can just
# ignore the record since it was a delete.
continue
self._brute_force_index = cast(BruteForceIndex, self._brute_force_index)
self._max_seq_id = max(self._max_seq_id, record["log_offset"])
id = record["record"]["id"]
op = record["record"]["operation"]
exists_in_bf_index = self._brute_force_index.has_id(id)
exists_in_persisted_index = self._id_to_label.get(id, None) is not None
exists_in_index = exists_in_bf_index or exists_in_persisted_index
id_is_pending_delete = self._curr_batch.is_deleted(id)
if op == Operation.DELETE:
if exists_in_index:
self._curr_batch.apply(record)
if exists_in_bf_index:
self._brute_force_index.delete([record])
else:
logger.warning(f"Delete of nonexisting embedding ID: {id}")
elif op == Operation.UPDATE:
if record["record"]["embedding"] is not None:
if exists_in_index:
self._curr_batch.apply(record)
self._brute_force_index.upsert([record])
else:
logger.warning(
f"Update of nonexisting embedding ID: {record['record']['id']}"
)
elif op == Operation.ADD:
if record["record"]["embedding"] is not None:
if exists_in_index and not id_is_pending_delete:
logger.warning(f"Add of existing embedding ID: {id}")
else:
self._curr_batch.apply(record, not exists_in_index)
self._brute_force_index.upsert([record])
elif op == Operation.UPSERT:
if record["record"]["embedding"] is not None:
self._curr_batch.apply(record, exists_in_index)
self._brute_force_index.upsert([record])
if self._num_log_records_since_last_batch >= self._batch_size:
self._apply_batch(self._curr_batch)
self._curr_batch = Batch()
self._brute_force_index.clear()
@override
def count(self, request_version_context: RequestVersionContext) -> int:
return (
len(self._id_to_label)
+ self._curr_batch.add_count
- self._curr_batch.delete_count
)
@trace_method(
"PersistentLocalHnswSegment.get_vectors", OpenTelemetryGranularity.ALL
)
@override
def get_vectors(
self,
request_version_context: RequestVersionContext,
ids: Optional[Sequence[str]] = None,
) -> Sequence[VectorEmbeddingRecord]:
"""Get the embeddings from the HNSW index and layered brute force
batch index."""
ids_hnsw: Set[str] = set()
ids_bf: Set[str] = set()
if self._index is not None:
ids_hnsw = set(self._id_to_label.keys())
if self._brute_force_index is not None:
ids_bf = set(self._curr_batch.get_written_ids())
target_ids = ids or list(ids_hnsw.union(ids_bf))
self._brute_force_index = cast(BruteForceIndex, self._brute_force_index)
hnsw_labels = []
results: List[Optional[VectorEmbeddingRecord]] = []
id_to_index: Dict[str, int] = {}
for i, id in enumerate(target_ids):
if id in ids_bf:
results.append(self._brute_force_index.get_vectors([id])[0])
elif id in ids_hnsw and id not in self._curr_batch._deleted_ids:
hnsw_labels.append(self._id_to_label[id])
# Placeholder for hnsw results to be filled in down below so we
# can batch the hnsw get() call
results.append(None)
id_to_index[id] = i
if len(hnsw_labels) > 0 and self._index is not None:
vectors = cast(
Sequence[Vector], np.array(self._index.get_items(hnsw_labels))
) # version 0.8 of hnswlib allows return_type="numpy"
for label, vector in zip(hnsw_labels, vectors):
id = self._label_to_id[label]
results[id_to_index[id]] = VectorEmbeddingRecord(
id=id, embedding=vector
)
return results # type: ignore ## Python can't cast List with Optional to List with VectorEmbeddingRecord
@trace_method(
"PersistentLocalHnswSegment.query_vectors", OpenTelemetryGranularity.ALL
)
@override
def query_vectors(
self, query: VectorQuery
) -> Sequence[Sequence[VectorQueryResult]]:
if self._index is None and self._brute_force_index is None:
return [[] for _ in range(len(query["vectors"]))]
k = query["k"]
if k > self.count(query["request_version_context"]):
count = self.count(query["request_version_context"])
logger.warning(
f"Number of requested results {k} is greater than number of elements in index {count}, updating n_results = {count}"
)
k = count
# Overquery by updated and deleted elements layered on the index because they may
# hide the real nearest neighbors in the hnsw index
hnsw_k = k + self._curr_batch.update_count + self._curr_batch.delete_count
# self._id_to_label contains the ids of the elements in the hnsw index
# so its length is the number of elements in the hnsw index
if hnsw_k > len(self._id_to_label):
hnsw_k = len(self._id_to_label)
hnsw_query = VectorQuery(
vectors=query["vectors"],
k=hnsw_k,
allowed_ids=query["allowed_ids"],
include_embeddings=query["include_embeddings"],
options=query["options"],
request_version_context=query["request_version_context"],
)
# For each query vector, we want to take the top k results from the
# combined results of the brute force and hnsw index
results: List[List[VectorQueryResult]] = []
self._brute_force_index = cast(BruteForceIndex, self._brute_force_index)
with ReadRWLock(self._lock):
bf_results = self._brute_force_index.query(query)
hnsw_results = super().query_vectors(hnsw_query)
for i in range(len(query["vectors"])):
# Merge results into a single list of size k
bf_pointer: int = 0
hnsw_pointer: int = 0
curr_bf_result: Sequence[VectorQueryResult] = bf_results[i]
curr_hnsw_result: Sequence[VectorQueryResult] = hnsw_results[i]
# Filter deleted results that haven't yet been removed from the persisted index
curr_hnsw_result = [
x
for x in curr_hnsw_result
if not self._curr_batch.is_deleted(x["id"])
]
curr_results: List[VectorQueryResult] = []
# In the case where filters cause the number of results to be less than k,
# we set k to be the number of results
total_results = len(curr_bf_result) + len(curr_hnsw_result)
if total_results == 0:
results.append([])
else:
while len(curr_results) < min(k, total_results):
if bf_pointer < len(curr_bf_result) and hnsw_pointer < len(
curr_hnsw_result
):
bf_dist = curr_bf_result[bf_pointer]["distance"]
hnsw_dist = curr_hnsw_result[hnsw_pointer]["distance"]
if bf_dist <= hnsw_dist:
curr_results.append(curr_bf_result[bf_pointer])
bf_pointer += 1
else:
id = curr_hnsw_result[hnsw_pointer]["id"]
# Only add the hnsw result if it is not in the brute force index
if not self._brute_force_index.has_id(id):
curr_results.append(curr_hnsw_result[hnsw_pointer])
hnsw_pointer += 1
else:
break
remaining = min(k, total_results) - len(curr_results)
if remaining > 0 and hnsw_pointer < len(curr_hnsw_result):
for i in range(
hnsw_pointer,
min(len(curr_hnsw_result), hnsw_pointer + remaining),
):
id = curr_hnsw_result[i]["id"]
if not self._brute_force_index.has_id(id):
curr_results.append(curr_hnsw_result[i])
elif remaining > 0 and bf_pointer < len(curr_bf_result):
curr_results.extend(
curr_bf_result[bf_pointer : bf_pointer + remaining]
)
results.append(curr_results)
return results
@trace_method(
"PersistentLocalHnswSegment.reset_state", OpenTelemetryGranularity.ALL
)
@override
def reset_state(self) -> None:
if self._allow_reset:
data_path = self._get_storage_folder()
if os.path.exists(data_path):
self.close_persistent_index()
shutil.rmtree(data_path, ignore_errors=True)
@trace_method("PersistentLocalHnswSegment.delete", OpenTelemetryGranularity.ALL)
@override
def delete(self) -> None:
data_path = self._get_storage_folder()
if os.path.exists(data_path):
self.close_persistent_index()
shutil.rmtree(data_path, ignore_errors=False)
@staticmethod
def get_file_handle_count() -> int:
"""Return how many file handles are used by the index"""
hnswlib_count = hnswlib.Index.file_handle_count
hnswlib_count = cast(int, hnswlib_count)
# One extra for the metadata file
return hnswlib_count + 1 # type: ignore
def open_persistent_index(self) -> None:
"""Open the persistent index"""
if self._index is not None:
self._index.open_file_handles()
@override
def stop(self) -> None:
super().stop()
self.close_persistent_index()
def close_persistent_index(self) -> None:
"""Close the persistent index"""
if self._index is not None:
self._index.close_file_handles()
| PersistentLocalHnswSegment |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1295840,
"end": 1296940
} | class ____(sgqlc.types.Type, Node):
"""A file in a package version."""
__schema__ = github_schema
__field_names__ = ("md5", "name", "package_version", "sha1", "sha256", "size", "updated_at", "url")
md5 = sgqlc.types.Field(String, graphql_name="md5")
"""MD5 hash of the file."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""Name of the file."""
package_version = sgqlc.types.Field("PackageVersion", graphql_name="packageVersion")
"""The package version this file belongs to."""
sha1 = sgqlc.types.Field(String, graphql_name="sha1")
"""SHA1 hash of the file."""
sha256 = sgqlc.types.Field(String, graphql_name="sha256")
"""SHA256 hash of the file."""
size = sgqlc.types.Field(Int, graphql_name="size")
"""Size of the file in bytes."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(URI, graphql_name="url")
"""URL to download the asset."""
| PackageFile |
python | kamyu104__LeetCode-Solutions | Python/count-special-integers.py | {
"start": 51,
"end": 756
} | class ____(object):
def countSpecialNumbers(self, n):
"""
:type n: int
:rtype: int
"""
def P(m, n):
result = 1
for _ in xrange(n):
result *= m
m -= 1
return result
digits = map(int, str(n+1))
result = sum(P(9, 1)*P(9, i-1) for i in xrange(1, len(digits)))
lookup = set()
for i, x in enumerate(digits):
for y in xrange(int(i == 0), x):
if y in lookup:
continue
result += P(9-i, len(digits)-i-1)
if x in lookup:
break
lookup.add(x)
return result
| Solution |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 154733,
"end": 155320
} | class ____(IRNode):
def get_reads(self) -> OrderedSet[Dep]:
return OrderedSet()
@cache_on_self_and_args("NoneAsConstantBuffer")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def codegen_reference(self, writer: Optional[IndentedBuffer] = None) -> str:
return V.graph.wrapper_code.none_str
def get_output_spec(self) -> OutputSpec:
return NoneLayout(device=None)
def has_tensor_output(self) -> bool:
return False
@ir_dataclass
| NoneAsConstantBuffer |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 19546,
"end": 19861
} | class ____(_ParamMixin):
def __init__(self, param_name):
self._wrapped_param_name = param_name
def __getattr__(self, name):
return getattr(self._wrapped_param_name, name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_param_name)
| ParamNameWrapper |
python | PrefectHQ__prefect | src/prefect/_versioning.py | {
"start": 1474,
"end": 12380
} | class ____(VersionInfo):
type: Literal["vcs:git"] = "vcs:git"
version: str
commit_sha: str
message: str
branch: str
repository: str
url: str
async def get_github_version_info(
commit_sha: Optional[str] = None,
message: Optional[str] = None,
branch: Optional[str] = None,
repository: Optional[str] = None,
url: Optional[str] = None,
) -> GithubVersionInfo:
"""Create a GithubVersionInfo object from provided values or environment variables.
Args:
commit_sha: The commit SHA, falls back to GITHUB_SHA env var
message: The commit message, falls back to git log -1 --pretty=%B
branch: The git branch, falls back to GITHUB_REF_NAME env var
repository: The repository name, falls back to GITHUB_REPOSITORY env var
url: The repository URL, constructed from GITHUB_SERVER_URL/GITHUB_REPOSITORY/tree/GITHUB_SHA if not provided
Returns:
A GithubVersionInfo
Raises:
ValueError: If any required fields cannot be determined
"""
try:
commit_sha = commit_sha or os.getenv("GITHUB_SHA")
branch = branch or os.getenv("GITHUB_REF_NAME")
repository = repository or os.getenv("GITHUB_REPOSITORY")
url = url or f"{os.getenv('GITHUB_SERVER_URL')}/{repository}/tree/{commit_sha}"
if not message:
message = await get_commit_message_first_line()
if not commit_sha:
raise ValueError(
"commit_sha is required - must be provided or set in GITHUB_SHA"
)
if not branch:
raise ValueError(
"branch is required - must be provided or set in GITHUB_REF_NAME"
)
if not repository:
raise ValueError(
"repository is required - must be provided or set in GITHUB_REPOSITORY"
)
except Exception as e:
raise ValueError(
f"Error getting git version info: {e}. You may not be in a Github repository."
)
return GithubVersionInfo(
type="vcs:github",
version=commit_sha[:8],
commit_sha=commit_sha,
message=message,
branch=branch,
repository=repository,
url=url,
)
async def get_gitlab_version_info(
commit_sha: Optional[str] = None,
message: Optional[str] = None,
branch: Optional[str] = None,
repository: Optional[str] = None,
url: Optional[str] = None,
) -> GitlabVersionInfo:
"""Create a GitlabVersionInfo object from provided values or environment variables.
Args:
commit_sha: The commit SHA, falls back to CI_COMMIT_SHA env var
message: The commit message, falls back to git log -1 --pretty=%B
branch: The git branch, falls back to CI_COMMIT_REF_NAME env var
repository: The repository name, falls back to CI_PROJECT_NAME env var
url: The repository URL, constructed from CI_PROJECT_URL/-/tree/CI_COMMIT_SHA if not provided
Returns:
A GitlabVersionInfo
Raises:
ValueError: If any required fields cannot be determined
"""
try:
commit_sha = commit_sha or os.getenv("CI_COMMIT_SHA")
branch = branch or os.getenv("CI_COMMIT_REF_NAME")
repository = repository or os.getenv("CI_PROJECT_NAME")
url = url or f"{os.getenv('CI_PROJECT_URL')}/-/tree/{commit_sha}"
if not message:
message = await get_commit_message_first_line()
if not commit_sha:
raise ValueError(
"commit_sha is required - must be provided or set in CI_COMMIT_SHA"
)
if not branch:
raise ValueError(
"branch is required - must be provided or set in CI_COMMIT_REF_NAME"
)
if not repository:
raise ValueError(
"repository is required - must be provided or set in CI_PROJECT_NAME"
)
if not url:
raise ValueError(
"url is required - must be provided or set in CI_PROJECT_URL"
)
except Exception as e:
raise ValueError(
f"Error getting git version info: {e}. You may not be in a Gitlab repository."
)
return GitlabVersionInfo(
type="vcs:gitlab",
version=commit_sha[:8],
commit_sha=commit_sha,
message=message,
branch=branch,
repository=repository,
url=url,
)
async def get_bitbucket_version_info(
commit_sha: Optional[str] = None,
message: Optional[str] = None,
branch: Optional[str] = None,
repository: Optional[str] = None,
url: Optional[str] = None,
) -> BitbucketVersionInfo:
"""Create a BitbucketVersionInfo object from provided values or environment variables.
Args:
commit_sha: The commit SHA, falls back to BITBUCKET_COMMIT env var
message: The commit message, falls back to git log -1 --pretty=%B
branch: The git branch, falls back to BITBUCKET_BRANCH env var
repository: The repository name, falls back to BITBUCKET_REPO_SLUG env var
url: The repository URL, constructed from BITBUCKET_GIT_HTTP_ORIGIN/BITBUCKET_REPO_SLUG/src/BITBUCKET_COMMIT if not provided
Returns:
A BitbucketVersionInfo
Raises:
ValueError: If any required fields cannot be determined
"""
try:
commit_sha = commit_sha or os.getenv("BITBUCKET_COMMIT")
branch = branch or os.getenv("BITBUCKET_BRANCH")
repository = repository or os.getenv("BITBUCKET_REPO_SLUG")
url = url or f"{os.getenv('BITBUCKET_GIT_HTTP_ORIGIN')}/src/{commit_sha}"
if not message:
message = await get_commit_message_first_line()
if not commit_sha:
raise ValueError(
"commit_sha is required - must be provided or set in BITBUCKET_COMMIT"
)
if not branch:
raise ValueError(
"branch is required - must be provided or set in BITBUCKET_BRANCH"
)
if not repository:
raise ValueError(
"repository is required - must be provided or set in BITBUCKET_REPO_SLUG"
)
if not url:
raise ValueError(
"url is required - must be provided or set in BITBUCKET_GIT_HTTP_ORIGIN"
)
except Exception as e:
raise ValueError(
f"Error getting git version info: {e}. You may not be in a Bitbucket repository."
)
return BitbucketVersionInfo(
type="vcs:bitbucket",
version=commit_sha[:8],
commit_sha=commit_sha,
message=message,
branch=branch,
repository=repository,
url=url,
)
async def get_azuredevops_version_info(
commit_sha: Optional[str] = None,
message: Optional[str] = None,
branch: Optional[str] = None,
repository: Optional[str] = None,
url: Optional[str] = None,
) -> AzureDevopsVersionInfo:
"""Create an AzureDevopsVersionInfo object from provided values or environment variables.
Args:
commit_sha: The commit SHA, falls back to BUILD_SOURCEVERSION env var
message: The commit message, falls back to git log -1 --pretty=%B
branch: The git branch, falls back to BUILD_SOURCEBRANCHNAME env var
repository: The repository name, falls back to BUILD_REPOSITORY_NAME env var
url: The repository URL, constructed from BUILD_REPOSITORY_URI?version=GCBUILD_SOURCEVERSION if not provided
Returns:
An AzureDevopsVersionInfo
Raises:
ValueError: If any required fields cannot be determined
"""
try:
commit_sha = commit_sha or os.getenv("BUILD_SOURCEVERSION")
branch = branch or os.getenv("BUILD_SOURCEBRANCHNAME")
repository = repository or os.getenv("BUILD_REPOSITORY_NAME")
url = url or f"{os.getenv('BUILD_REPOSITORY_URI')}?version=GC{commit_sha}"
if not message:
message = await get_commit_message_first_line()
if not commit_sha:
raise ValueError(
"commit_sha is required - must be provided or set in BUILD_SOURCEVERSION"
)
if not branch:
raise ValueError(
"branch is required - must be provided or set in BUILD_SOURCEBRANCHNAME"
)
if not repository:
raise ValueError(
"repository is required - must be provided or set in BUILD_REPOSITORY_NAME"
)
if not url:
raise ValueError(
"url is required - must be provided or set in BUILD_REPOSITORY_URI"
)
except Exception as e:
raise ValueError(
f"Error getting git version info: {e}. You may not be in an Azure DevOps repository."
)
return AzureDevopsVersionInfo(
type="vcs:azuredevops",
version=commit_sha[:8],
commit_sha=commit_sha,
message=message,
branch=branch,
repository=repository,
url=url,
)
async def get_git_version_info(
commit_sha: Optional[str] = None,
message: Optional[str] = None,
branch: Optional[str] = None,
url: Optional[str] = None,
repository: Optional[str] = None,
) -> GitVersionInfo:
try:
if not commit_sha:
# Run git command and get stdout
result = await run_process(["git", "rev-parse", "HEAD"])
# Decode bytes to string and strip whitespace
commit_sha = result.stdout.decode().strip()
if not branch:
result = await run_process(["git", "rev-parse", "--abbrev-ref", "HEAD"])
branch = result.stdout.decode().strip()
if not repository:
result = await run_process(["git", "config", "--get", "remote.origin.url"])
remote_url = result.stdout.decode().strip()
# Extract just the repository name (last part of the path)
repo_url = urlparse(remote_url)
repository = repo_url.path.strip("/")
if repository.endswith(".git"):
repository = repository[:-4]
if not message:
message = await get_commit_message_first_line()
if not url and repository:
# Use the full remote URL as the URL
result = await run_process(["git", "config", "--get", "remote.origin.url"])
url = result.stdout.decode().strip()
except Exception as e:
raise ValueError(
f"Error getting git version info: {e}. You may not be in a git repository."
)
if not url:
raise ValueError("Could not determine git repository URL")
return GitVersionInfo(
type="vcs:git",
version=commit_sha[:8],
branch=branch,
url=url,
repository=repository,
commit_sha=commit_sha,
message=message,
)
| GitVersionInfo |
python | ansible__ansible | test/integration/targets/ansible-test-container/runme.py | {
"start": 23446,
"end": 23646
} | class ____:
ssh: User = None
remote: User = None
@property
def actual(self) -> User:
return self.remote or self.ssh or ROOT_USER
@dataclasses.dataclass(frozen=True)
| UserScenario |
python | cython__cython | Cython/Distutils/old_build_ext.py | {
"start": 2303,
"end": 13723
} | class ____(_build_ext.build_ext):
description = "build C/C++ and Cython extensions (compile/link to build directory)"
sep_by = _build_ext.build_ext.sep_by
user_options = _build_ext.build_ext.user_options[:]
boolean_options = _build_ext.build_ext.boolean_options[:]
help_options = _build_ext.build_ext.help_options[:]
# Add the pyrex specific data.
user_options.extend([
('cython-cplus', None,
"generate C++ source files"),
('cython-create-listing', None,
"write errors to a listing file"),
('cython-line-directives', None,
"emit source line directives"),
('cython-include-dirs=', None,
"path to the Cython include files" + sep_by),
('cython-c-in-temp', None,
"put generated C files in temp directory"),
('cython-gen-pxi', None,
"generate .pxi file for public declarations"),
('cython-directives=', None,
"compiler directive overrides"),
('cython-gdb', None,
"generate debug information for cygdb"),
('cython-compile-time-env', None,
"cython compile time environment"),
# For backwards compatibility.
('pyrex-cplus', None,
"generate C++ source files"),
('pyrex-create-listing', None,
"write errors to a listing file"),
('pyrex-line-directives', None,
"emit source line directives"),
('pyrex-include-dirs=', None,
"path to the Cython include files" + sep_by),
('pyrex-c-in-temp', None,
"put generated C files in temp directory"),
('pyrex-gen-pxi', None,
"generate .pxi file for public declarations"),
('pyrex-directives=', None,
"compiler directive overrides"),
('pyrex-gdb', None,
"generate debug information for cygdb"),
])
boolean_options.extend([
'cython-cplus', 'cython-create-listing', 'cython-line-directives',
'cython-c-in-temp', 'cython-gdb',
# For backwards compatibility.
'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
'pyrex-c-in-temp', 'pyrex-gdb',
])
def initialize_options(self):
_build_ext.build_ext.initialize_options(self)
self.cython_cplus = 0
self.cython_create_listing = 0
self.cython_line_directives = 0
self.cython_include_dirs = None
self.cython_directives = None
self.cython_c_in_temp = 0
self.cython_gen_pxi = 0
self.cython_gdb = False
self.no_c_in_traceback = 0
self.cython_compile_time_env = None
def __getattr__(self, name):
if name[:6] == 'pyrex_':
return getattr(self, 'cython_' + name[6:])
else:
return _build_ext.build_ext.__getattr__(self, name)
def __setattr__(self, name, value):
if name[:6] == 'pyrex_':
return setattr(self, 'cython_' + name[6:], value)
else:
# _build_ext.build_ext.__setattr__(self, name, value)
self.__dict__[name] = value
def finalize_options(self):
_build_ext.build_ext.finalize_options(self)
if self.cython_include_dirs is None:
self.cython_include_dirs = []
elif isinstance(self.cython_include_dirs, str):
self.cython_include_dirs = \
self.cython_include_dirs.split(os.pathsep)
if self.cython_directives is None:
self.cython_directives = {}
# finalize_options ()
def run(self):
# We have one shot at this before build_ext initializes the compiler.
# If --pyrex-gdb is in effect as a command line option or as option
# of any Extension module, disable optimization for the C or C++
# compiler.
if self.cython_gdb or [1 for ext in self.extensions
if getattr(ext, 'cython_gdb', False)]:
optimization.disable_optimization()
_build_ext.build_ext.run(self)
def check_extensions_list(self, extensions):
# Note: might get called multiple times.
_build_ext.build_ext.check_extensions_list(self, extensions)
for ext in self.extensions:
ext.sources = self.cython_sources(ext.sources, ext)
def cython_sources(self, sources, extension):
"""
Walk the list of source files in 'sources', looking for Cython
source files (.pyx and .py). Run Cython on all that are
found, and return a modified 'sources' list with Cython source
files replaced by the generated C (or C++) files.
"""
new_sources = []
cython_sources = []
cython_targets = {}
# Setup create_list and cplus from the extension options if
# Cython.Distutils.extension.Extension is used, otherwise just
# use what was parsed from the command-line or the configuration file.
# cplus will also be set to true is extension.language is equal to
# 'C++' or 'c++'.
#try:
# create_listing = self.cython_create_listing or \
# extension.cython_create_listing
# cplus = self.cython_cplus or \
# extension.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
#except AttributeError:
# create_listing = self.cython_create_listing
# cplus = self.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
create_listing = self.cython_create_listing or \
getattr(extension, 'cython_create_listing', 0)
line_directives = self.cython_line_directives or \
getattr(extension, 'cython_line_directives', 0)
no_c_in_traceback = self.no_c_in_traceback or \
getattr(extension, 'no_c_in_traceback', 0)
cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \
(extension.language and extension.language.lower() == 'c++')
cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0)
cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False)
cython_compile_time_env = self.cython_compile_time_env or \
getattr(extension, 'cython_compile_time_env', None)
# Set up the include_path for the Cython compiler:
# 1. Start with the command line option.
# 2. Add in any (unique) paths from the extension
# cython_include_dirs (if Cython.Distutils.extension is used).
# 3. Add in any (unique) paths from the extension include_dirs
includes = list(self.cython_include_dirs)
try:
for i in extension.cython_include_dirs:
if i not in includes:
includes.append(i)
except AttributeError:
pass
# In case extension.include_dirs is a generator, evaluate it and keep
# result
extension.include_dirs = list(extension.include_dirs)
for i in extension.include_dirs:
if i not in includes:
includes.append(i)
# Set up Cython compiler directives:
# 1. Start with the command line option.
# 2. Add in any (unique) entries from the extension
# cython_directives (if Cython.Distutils.extension is used).
directives = dict(self.cython_directives)
if hasattr(extension, "cython_directives"):
directives.update(extension.cython_directives)
# Set the target file extension for C/C++ mode.
if cplus:
target_ext = '.cpp'
else:
target_ext = '.c'
# Decide whether to drop the generated C files into the temp dir
# or the source tree.
if not self.inplace and (self.cython_c_in_temp
or getattr(extension, 'cython_c_in_temp', 0)):
target_dir = os.path.join(self.build_temp, "pyrex")
for package_name in extension.name.split('.')[:-1]:
target_dir = os.path.join(target_dir, package_name)
else:
target_dir = None
newest_dependency = None
for source in sources:
(base, ext) = os.path.splitext(os.path.basename(source))
if ext == ".py":
# FIXME: we might want to special case this some more
ext = '.pyx'
if ext == ".pyx": # Cython source file
output_dir = target_dir or os.path.dirname(source)
new_sources.append(os.path.join(output_dir, base + target_ext))
cython_sources.append(source)
cython_targets[source] = new_sources[-1]
elif ext == '.pxi' or ext == '.pxd':
if newest_dependency is None \
or newer(source, newest_dependency):
newest_dependency = source
else:
new_sources.append(source)
if not cython_sources:
return new_sources
try:
from Cython.Compiler.Main \
import CompilationOptions, \
default_options as cython_default_options, \
compile as cython_compile
from Cython.Compiler.Errors import PyrexError
except ImportError:
e = sys.exc_info()[1]
print("failed to import Cython: %s" % e)
raise DistutilsPlatformError("Cython does not appear to be installed")
module_name = extension.name
for source in cython_sources:
target = cython_targets[source]
depends = [source] + list(extension.depends or ())
if source[-4:].lower() == ".pyx" and os.path.isfile(source[:-3] + "pxd"):
depends += [source[:-3] + "pxd"]
rebuild = self.force or newer_group(depends, target, 'newer')
if not rebuild and newest_dependency is not None:
rebuild = newer(newest_dependency, target)
if rebuild:
log.info("cythoning %s to %s", source, target)
self.mkpath(os.path.dirname(target))
if self.inplace:
output_dir = os.curdir
else:
output_dir = self.build_lib
options = CompilationOptions(cython_default_options,
use_listing_file = create_listing,
include_path = includes,
compiler_directives = directives,
output_file = target,
cplus = cplus,
emit_linenums = line_directives,
c_line_in_traceback = not no_c_in_traceback,
generate_pxi = cython_gen_pxi,
output_dir = output_dir,
gdb_debug = cython_gdb,
compile_time_env = cython_compile_time_env)
result = cython_compile(source, options=options,
full_module_name=module_name)
else:
log.info("skipping '%s' Cython extension (up-to-date)", target)
return new_sources
# cython_sources ()
# class build_ext
| old_build_ext |
python | django__django | tests/signed_cookies_tests/tests.py | {
"start": 216,
"end": 3395
} | class ____(SimpleTestCase):
def test_can_set_and_read_signed_cookies(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
self.assertIn("c", response.cookies)
self.assertTrue(response.cookies["c"].value.startswith("hello:"))
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
value = request.get_signed_cookie("c")
self.assertEqual(value, "hello")
def test_can_use_salt(self):
response = HttpResponse()
response.set_signed_cookie("a", "hello", salt="one")
request = HttpRequest()
request.COOKIES["a"] = response.cookies["a"].value
value = request.get_signed_cookie("a", salt="one")
self.assertEqual(value, "hello")
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie("a", salt="two")
def test_detects_tampering(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value[:-2] + "$$"
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie("c")
def test_default_argument_suppresses_exceptions(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value[:-2] + "$$"
self.assertIsNone(request.get_signed_cookie("c", default=None))
def test_max_age_argument(self):
value = "hello"
with freeze_time(123456789):
response = HttpResponse()
response.set_signed_cookie("c", value)
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
self.assertEqual(request.get_signed_cookie("c"), value)
with freeze_time(123456800):
self.assertEqual(request.get_signed_cookie("c", max_age=12), value)
self.assertEqual(request.get_signed_cookie("c", max_age=11), value)
self.assertEqual(
request.get_signed_cookie("c", max_age=timedelta(seconds=11)), value
)
with self.assertRaises(signing.SignatureExpired):
request.get_signed_cookie("c", max_age=10)
with self.assertRaises(signing.SignatureExpired):
request.get_signed_cookie("c", max_age=timedelta(seconds=10))
def test_set_signed_cookie_max_age_argument(self):
response = HttpResponse()
response.set_signed_cookie("c", "value", max_age=100)
self.assertEqual(response.cookies["c"]["max-age"], 100)
response.set_signed_cookie("d", "value", max_age=timedelta(hours=2))
self.assertEqual(response.cookies["d"]["max-age"], 7200)
@override_settings(SECRET_KEY=b"\xe7")
def test_signed_cookies_with_binary_key(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
self.assertEqual(request.get_signed_cookie("c"), "hello")
| SignedCookieTest |
python | pyodide__pyodide | src/tests/test_typeconversions.py | {
"start": 470,
"end": 55771
} | class ____(pickle.Unpickler):
def find_class(self, module, name):
# Only allow safe classes from builtins.
if module == "hypothesis":
raise pickle.UnpicklingError()
return super().find_class(module, name)
def no_hypothesis(x):
try:
NoHypothesisUnpickler(io.BytesIO(pickle.dumps(x))).load()
return True
except Exception:
return False
@given(s=text())
@settings(deadline=10000)
@example("\ufeff")
def test_string_conversion(selenium_module_scope, s):
@run_in_pyodide
def main(selenium, sbytes):
from pyodide.code import run_js
run_js("self.encoder = new TextEncoder()")
run_js("self.decoder = new TextDecoder('utf8', {ignoreBOM: true})")
spy = bytes(sbytes).decode()
sjs = run_js(
"""
(sbytes) => {
self.sjs = self.decoder.decode(new Uint8Array(sbytes));
return sjs;
}
"""
)(sbytes)
assert sjs == spy
assert run_js("(spy) => spy === self.sjs")(spy)
with selenium_context_manager(selenium_module_scope) as selenium:
sbytes = list(s.encode())
main(selenium, sbytes)
@given(s=text())
@std_hypothesis_settings
@example("\ufeff")
@run_in_pyodide
def test_string_conversion2(selenium, s):
from pyodide.code import run_js
run_js("self.encoder = new TextEncoder()")
run_js("self.decoder = new TextDecoder('utf8', {ignoreBOM: true})")
s_encoded = s.encode()
sjs = run_js(
"""
(s_encoded) => {
let buf = s_encoded.getBuffer();
self.sjs = self.decoder.decode(buf.data);
buf.release();
return sjs
}
"""
)(s_encoded)
assert sjs == s
assert run_js("""(spy) => spy === self.sjs""")(s)
def blns():
import base64
import json
with open("./src/tests/blns.base64.json") as f:
BLNS = json.load(f)
for s in BLNS:
yield base64.b64decode(s).decode(errors="ignore")
@pytest.mark.driver_timeout(60)
def test_string_conversion_blns(selenium):
@run_in_pyodide
def _string_conversion_blns_internal(selenium, s):
from pyodide.code import run_js
run_js("self.encoder = new TextEncoder()")
run_js("self.decoder = new TextDecoder('utf8', {ignoreBOM: true})")
s_encoded = s.encode()
sjs = run_js(
"""
(s_encoded) => {
let buf = s_encoded.getBuffer();
self.sjs = self.decoder.decode(buf.data);
buf.release();
return sjs
}
"""
)(s_encoded)
assert sjs == s
assert run_js("""(spy) => spy === self.sjs""")(s)
strings = blns()
for s in strings:
_string_conversion_blns_internal(selenium, s)
@run_in_pyodide
def test_large_string_conversion(selenium):
from pyodide.code import run_js
longstr = run_js('"ab".repeat(200_000)')
res = longstr.count("ab")
assert res == 200_000
run_js(
"""
(s) => {
assert(() => s.length === 40_000);
for(let n = 0; n < 20_000; n++){
assert(() => s.slice(2*n, 2*n+2) === "ab");
}
}
"""
)("ab" * 20_000)
@given(
n=st.one_of(
st.integers(),
st.floats(allow_nan=False),
)
)
@std_hypothesis_settings
@example(2**53)
@example(2**53 - 1)
@example(2**53 + 1)
@example(-(2**53))
@example(-(2**53) - 1)
@example(-(2**53) + 1)
@run_in_pyodide
def test_number_conversions(selenium_module_scope, n):
import json
from pyodide.code import run_js
x_js = run_js("(s) => self.x_js = eval(s)")(json.dumps(n))
run_js("(x_py) => Number(x_py) === x_js")(n)
if isinstance(x_js, float):
assert x_js == float(n)
else:
assert x_js == n
@given(n=st.floats())
@std_hypothesis_settings
@run_in_pyodide
def test_number_conversions_2(selenium_module_scope, n):
from pyodide.code import run_js
assert run_js("(n) => typeof n")(n) == "number"
from math import isinf, isnan
if isnan(n):
return
import json
n_js = run_js("(s) => eval(s)")(json.dumps(n))
if not isinf(n) and float(int(n)) == n and -(2**53) < n < 2**53:
assert isinstance(n_js, int)
else:
assert isinstance(n_js, float)
@given(n=st.integers())
@std_hypothesis_settings
@example(2**53)
@example(2**53 - 1)
@example(2**53 + 1)
@example(-(2**53))
@example(-(2**53) - 1)
@example(-(2**53) + 1)
@run_in_pyodide
def test_number_conversions_3(selenium_module_scope, n):
from pyodide.code import run_js
jsty = run_js("(n) => typeof n")(n)
if -(2**53) + 1 < n < 2**53 - 1:
assert jsty == "number"
else:
assert jsty == "bigint"
import json
n_js = run_js("(s) => eval(s)")(json.dumps(n))
if -(2**53) < n < 2**53:
assert isinstance(n_js, int)
else:
assert isinstance(n_js, float)
@run_in_pyodide
def test_nan_conversions(selenium):
from pyodide.code import run_js
jsnan = run_js("NaN")
from math import isnan
assert isnan(jsnan)
assert run_js(
"""
let mathmod = pyodide.pyimport("math");
const res = Number.isNaN(mathmod.nan);
mathmod.destroy();
res
"""
)
@given(n=st.integers())
@std_hypothesis_settings
def test_bigint_conversions(selenium_module_scope, n):
with selenium_context_manager(selenium_module_scope) as selenium:
h = hex(n)
selenium.run_js(f"self.h = {h!r};")
selenium.run_js(
"""
let negative = false;
let h2 = h;
if(h2.startsWith('-')){
h2 = h2.slice(1);
negative = true;
}
self.n = BigInt(h2);
if(negative){
self.n = -n;
}
pyodide.runPython(`
from js import n, h
n2 = int(h, 16)
assert n == n2
`);
let n2 = pyodide.globals.get("n2");
let n3 = Number(n2);
if(Number.isSafeInteger(n3)){
assert(() => typeof n2 === "number");
assert(() => n2 === Number(n));
} else {
assert(() => typeof n2 === "bigint");
assert(() => n2 === n);
}
"""
)
@given(
n=st.one_of(
st.integers(min_value=2**53 + 1),
st.integers(max_value=-(2**53) - 1),
)
)
@std_hypothesis_settings
def test_big_int_conversions2(selenium_module_scope, n):
@run_in_pyodide
def main(selenium, s):
import json
from pyodide.code import run_js
x_py = json.loads(s)
x_js, check = run_js(
"""
(s, x_py) => {
let x_js = eval(s + 'n');
return [x_js, x_py === x_js];
}
"""
)(s, x_py)
assert check
assert x_js == x_py
with selenium_context_manager(selenium_module_scope) as selenium:
import json
s = json.dumps(n)
main(selenium, s)
@given(
n=st.integers(),
exp=st.integers(min_value=1, max_value=10),
)
@std_hypothesis_settings
def test_big_int_conversions3(selenium_module_scope, n, exp):
@run_in_pyodide
def main(selenium, s):
import json
from pyodide.code import run_js
x_py = json.loads(s)
x_js = run_js(
f"""
self.x_js = eval('{s}n'); // JSON.parse apparently doesn't work
"""
)
[x1, x2] = run_js(
"""
(x_py) => [x_py.toString(), x_js.toString()]
"""
)(x_py)
assert x1 == x2
check = run_js(
"""
(x) => {
const [a, b] = x.toJs();
return a === b;
}
"""
)([str(x_js), str(x_py)])
assert check
with selenium_context_manager(selenium_module_scope) as selenium:
val = 2 ** (32 * exp) - n
import json
s = json.dumps(val)
main(selenium, s)
@given(obj=any_equal_to_self_strategy.filter(no_hypothesis))
@std_hypothesis_settings
@run_in_pyodide
def test_hyp_py2js2py(selenium, obj):
import __main__
from pyodide.code import run_js
__main__.obj = obj
try:
run_js('self.obj2 = pyodide.globals.get("obj"); 0;')
from js import obj2 # type:ignore[attr-defined]
assert obj2 == obj
run_js(
"""
if(self.obj2 && self.obj2.destroy){
self.obj2.destroy();
}
delete self.obj2
"""
)
finally:
del __main__.obj
@given(obj=any_equal_to_self_strategy.filter(no_hypothesis))
@std_hypothesis_settings
@run_in_pyodide
def test_hyp_py2js2py_2(selenium, obj):
import __main__
from pyodide.code import run_js
__main__.o = obj
try:
assert obj == run_js("pyodide.globals.get('o')")
finally:
del __main__.o
@pytest.mark.parametrize("a", [9992361673228537, -9992361673228537])
@run_in_pyodide
def test_big_integer_py2js2py(selenium, a):
import __main__
from pyodide.code import run_js
__main__.a = a
try:
b = run_js("pyodide.globals.get('a')")
assert a == b
finally:
del __main__.a
# Generate an object of any type
@pytest.mark.skip_refcount_check
@pytest.mark.skip_pyproxy_check
@given(obj=any_strategy.filter(no_hypothesis))
@std_hypothesis_settings
@run_in_pyodide
def test_hyp_tojs_no_crash(selenium, obj):
import __main__
from pyodide.code import run_js
__main__.x = obj
try:
run_js(
"""
let x = pyodide.globals.get("x");
if(x && x.toJs){
x.toJs();
}
"""
)
finally:
del __main__.x
@pytest.mark.skip_refcount_check
@pytest.mark.skip_pyproxy_check
@given(obj=any_strategy.filter(no_hypothesis))
@example(obj=range(0, 2147483648)) # length is too big to fit in ssize_t
@settings(
std_hypothesis_settings,
max_examples=25,
)
@run_in_pyodide
def test_hypothesis(selenium_standalone, obj):
from pyodide.ffi import to_js
to_js(obj)
@pytest.mark.parametrize(
"py,js",
[
(None, "undefined"),
(True, "true"),
(False, "false"),
(42, "42"),
(3.14, "3.14"),
# Need to test all three internal string representations in Python:
# UCS1, UCS2 and UCS4
("ascii", "'ascii'"),
("ιωδιούχο", "'ιωδιούχο'"),
("碘化物", "'碘化物'"),
("🐍", "'🐍'"),
],
)
@run_in_pyodide
def test_python2js1(selenium, py, js):
from pyodide.code import run_js
assert run_js(
f"""
(py) => py === {js}
"""
)(py)
@run_in_pyodide
def test_python2js2(selenium):
from pyodide.code import run_js
assert list(
run_js(
"""
(x) => {
x = x.toJs();
return [x.constructor.name, x.length, x[0]];
}
"""
)(b"bytes")
) == ["Uint8Array", 5, 98]
@run_in_pyodide
def test_python2js3(selenium):
from pyodide.code import run_js
l = [7, 9, 13]
result = run_js(
"""
(proxy) => {
x = proxy.toJs();
return [proxy.type, x.constructor.name, x.length, x[0], x[1], x[2]]
}
"""
)(l)
assert list(result) == ["list", "Array", 3, *l]
@run_in_pyodide
def test_python2js4(selenium):
from pyodide.code import run_js
assert list(
run_js(
"""
(proxy) => {
let typename = proxy.type;
let x = proxy.toJs();
return [proxy.type, x.constructor.name, x[42]];
}
"""
)({42: 64})
) == ["dict", "Object", 64]
@run_in_pyodide
def test_python2js5(selenium):
from pyodide.code import run_js
assert run_js("(x) => x.tell()")(open("/foo.txt", "wb")) == 0
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
contents = ["a\n", "b\n", "hello there!\n"]
f.writelines(contents)
assert run_js("(f) => f.tell()")(f) == 17
assert (
run_js(
"""
(f) => {
f.seek(0);
return [f.readline(), f.readline(), f.readline()];
}
"""
)(f).to_py()
== contents
)
def test_python2js_track_proxies(selenium):
selenium.run_js(
"""
let x = pyodide.runPython(`
class T:
pass
[[T()],[T()], [[[T()],[T()]],[T(), [], [[T()]], T()], T(), T()], T()]
`);
let proxies = [];
let result = x.toJs({ pyproxies : proxies });
assert(() => proxies.length === 10);
for(let x of proxies){
x.destroy();
}
function check(l){
for(let x of l){
if(x instanceof pyodide.ffi.PyProxy){
assert(() => !pyodide._api.pyproxyIsAlive(x));
} else {
check(x);
}
}
}
check(result);
assertThrows(() => x.toJs({create_pyproxies : false}), "PythonError", "pyodide.ffi.ConversionError");
x.destroy();
"""
)
@run_in_pyodide
def test_wrong_way_track_proxies(selenium):
from pyodide.code import run_js
checkDestroyed = run_js(
"""
function checkDestroyed(l){
for(let e of l){
if(e instanceof pyodide.ffi.PyProxy){
assert(() => !pyodide._api.pyproxyIsAlive(e));
} else {
checkDestroyed(e);
}
}
};
checkDestroyed
"""
)
from unittest import TestCase
from js import Array, Object
from pyodide.ffi import ConversionError, destroy_proxies, to_js
raises = TestCase().assertRaises
class T:
pass
x = [[T()], [T()], [[[T()], [T()]], [T(), [], [[T()]], T()], T(), T()], T()]
proxylist = Array.new()
r = to_js(x, pyproxies=proxylist)
assert len(proxylist) == 10
destroy_proxies(proxylist)
checkDestroyed(r)
with raises(TypeError):
to_js(x, pyproxies=[]) # type:ignore[call-overload]
with raises(TypeError):
to_js(x, pyproxies=Object.new())
with raises(ConversionError):
to_js(x, create_pyproxies=False)
def test_wrong_way_conversions1(selenium):
selenium.run_js(
"""
assert(() => pyodide.toPy(5) === 5);
assert(() => pyodide.toPy(5n) === 5n);
assert(() => pyodide.toPy("abc") === "abc");
class Test {};
let t = new Test();
assert(() => pyodide.toPy(t) === t);
self.a1 = [1,2,3];
self.b1 = pyodide.toPy(a1);
self.a2 = { a : 1, b : 2, c : 3};
self.b2 = pyodide.toPy(a2);
pyodide.runPython(`
from js import a1, b1, a2, b2
assert a1.to_py() == b1
assert a2.to_py() == b2
`);
self.b1.destroy();
self.b2.destroy();
"""
)
@run_in_pyodide
def test_wrong_way_conversions2(selenium):
from pyodide.code import run_js
from pyodide.ffi import to_js
[astr, bstr] = run_js(
"""
(a) => {
b = [1,2,3];
return [JSON.stringify(a), JSON.stringify(b)]
}
"""
)(to_js([1, 2, 3]))
assert astr == bstr
@run_in_pyodide
def test_wrong_way_conversions3(selenium):
from pyodide.code import run_js
from pyodide.ffi import to_js
class Test:
pass
t1 = Test()
t2 = to_js(t1)
t3 = run_js("(t2) => t2.copy()")(t2)
assert t1 is t3
t2.destroy()
@run_in_pyodide
def test_wrong_way_conversions4(selenium):
from pyodide.ffi import to_js
s = "avafhjpa"
t = 55
assert to_js(s) is s
assert to_js(t) is t
@run_in_pyodide
def test_dict_converter1(selenium):
import json
from pyodide.code import run_js
from pyodide.ffi import to_js
arrayFrom = run_js("Array.from")
d = {x: x + 2 for x in range(5)}
res = to_js(d, dict_converter=arrayFrom)
constructor, serialized = run_js(
"""
(res) => {
return [res.constructor.name, JSON.stringify(res)];
}
"""
)(res)
assert constructor == "Array"
assert json.loads(serialized) == [list(x) for x in d.items()]
@run_in_pyodide
def test_dict_converter2(selenium):
import json
from pyodide.code import run_js
d = {x: x + 2 for x in range(5)}
constructor, serialized = run_js(
"""
(d) => {
const res = d.toJs({dict_converter : Array.from});
return [res.constructor.name, JSON.stringify(res)];
}
"""
)(d)
assert constructor == "Array"
assert json.loads(serialized) == [list(x) for x in d.items()]
@run_in_pyodide
def test_dict_converter3(selenium):
import json
from js import Object
from pyodide.code import run_js
from pyodide.ffi import to_js
d = {x: x + 2 for x in range(5)}
res = to_js(d, dict_converter=Object.fromEntries)
constructor, serialized = run_js(
"""
(res) => [res.constructor.name, JSON.stringify(res)]
"""
)(res)
assert constructor == "Object"
assert json.loads(serialized) == {str(k): v for k, v in d.items()}
@run_in_pyodide
def test_dict_converter4(selenium):
import json
from pyodide.code import run_js
d = {x: x + 2 for x in range(5)}
constructor, serialized = run_js(
"""
(px) => {
let res = px.toJs({dict_converter : Object.fromEntries});
return [res.constructor.name, JSON.stringify(res)];
}
"""
)(d)
assert constructor == "Object"
assert json.loads(serialized) == {str(k): v for k, v in d.items()}
@pytest.mark.parametrize(
"formula",
["2**30", "2**31", "2**30 - 1 + 2**30", "2**32 / 2**4", "-2**30", "-2**31"],
)
def test_python2js_long_ints(selenium, formula):
assert selenium.run(formula) == eval(formula)
@run_in_pyodide
def test_python2js_long_ints2(selenium):
from pyodide.code import run_js
assert run_js(
"""
(x) => x === 2n**64n;
"""
)(2**64)
assert run_js(
"""
(x) => x === -(2n**64n);
"""
)(-(2**64))
def test_pythonexc2js(selenium):
msg = "ZeroDivisionError"
with pytest.raises(selenium.JavascriptException, match=msg):
selenium.run_js('return pyodide.runPython("5 / 0")')
@run_in_pyodide
def test_js2python_null(selenium):
from pyodide.code import run_js
from pyodide.ffi import jsnull
assert run_js("null") is jsnull
assert run_js("[null]")[0] is jsnull
assert run_js("() => null")() is jsnull
assert run_js("({a: null})").a is jsnull
assert run_js("new Map([['a', null]])")["a"] is jsnull
assert run_js("[null, null, null]").to_py() == [jsnull, jsnull, jsnull]
assert run_js("new Map([['a', null]])").to_py() == {"a": jsnull}
@run_in_pyodide
def test_json_dumps_null(selenium):
import json
from pyodide.ffi import jsnull
assert json.dumps(jsnull) == "null"
assert (
json.dumps([jsnull, jsnull, {jsnull: 1, 1: jsnull}])
== '[null, null, {"null": 1, "1": null}]'
)
@run_in_pyodide
def test_js2python_basic(selenium):
from pyodide.code import run_js
from pyodide.ffi import jsnull
t = run_js(
"""
({
jsstring_ucs1 : "pyodidé",
jsstring_ucs2 : "碘化物",
jsstring_ucs4 : "🐍",
jsnumber0 : 42,
jsnumber1 : 42.5,
jsundefined : undefined,
jsnull : null,
jstrue : true,
jsfalse : false,
jsarray0 : [],
jsarray1 : [1, 2, 3],
jspython : pyodide.globals.get("open"),
jsbytes : new Uint8Array([1, 2, 3]),
jsfloats : new Float32Array([1, 2, 3]),
jsobject : new TextDecoder(),
});
"""
)
assert t.jsstring_ucs1 == "pyodidé"
assert t.jsstring_ucs2 == "碘化物"
assert t.jsstring_ucs4 == "🐍"
assert t.jsnumber0 == 42 and isinstance(t.jsnumber0, int)
assert t.jsnumber1 == 42.5 and isinstance(t.jsnumber1, float)
assert t.jsundefined is None
assert t.jsnull is jsnull
assert t.jstrue is True
assert t.jsfalse is False
assert t.jspython is open
jsbytes = t.jsbytes.to_py()
assert (jsbytes.tolist() == [1, 2, 3]) and (jsbytes.tobytes() == b"\x01\x02\x03")
jsfloats = t.jsfloats.to_py()
import struct
expected = struct.pack("fff", 1, 2, 3)
assert (jsfloats.tolist() == [1, 2, 3]) and (jsfloats.tobytes() == expected)
assert str(t.jsobject) == "[object TextDecoder]"
assert bool(t.jsobject) is True
assert bool(t.jsarray0) is False
assert bool(t.jsarray1) is True
run_js("(t) => t.jspython.destroy()")(t)
@pytest.mark.parametrize(
"jsval, is_truthy",
[
("()=>{}", True),
("new Map()", False),
("new Map([[0, 1]])", True),
("new Set()", False),
("new Set([0])", True),
],
)
@run_in_pyodide
def test_js2python_bool(selenium, jsval, is_truthy):
from pyodide.code import run_js
assert bool(run_js(jsval)) is is_truthy
@pytest.mark.parametrize(
"jstype, pytype",
(
("Int8Array", "b"),
("Uint8Array", "B"),
("Uint8ClampedArray", "B"),
("Int16Array", "h"),
("Uint16Array", "H"),
("Int32Array", "i"),
("Uint32Array", "I"),
("Float32Array", "f"),
("Float64Array", "d"),
),
)
@run_in_pyodide
def test_typed_arrays(selenium, jstype, pytype):
from pyodide.code import run_js
array = run_js(f"new {jstype}([1, 2, 3, 4]);").to_py()
print(array.format, array.tolist(), array.tobytes())
assert array.format == pytype
assert array.tolist() == [1, 2, 3, 4]
import struct
assert array.tobytes() == struct.pack(pytype * 4, 1, 2, 3, 4)
@run_in_pyodide
def test_array_buffer(selenium):
from pyodide.code import run_js
array = run_js("new ArrayBuffer(100);").to_py()
assert len(array.tobytes()) == 100
def assert_js_to_py_to_js(selenium, name):
selenium.run_js(f"self.obj = {name};")
selenium.run("from js import obj")
assert selenium.run_js(
"""
let pyobj = pyodide.globals.get("obj");
return pyobj === obj;
"""
)
def assert_py_to_js_to_py(selenium, name):
selenium.run_js(
f"""
self.obj = pyodide.runPython('{name}');
pyodide.runPython(`
from js import obj
assert obj is {name}
`);
obj.destroy();
"""
)
@run_in_pyodide
def test_recursive_list_to_js(selenium):
x: Any = []
x.append(x)
from pyodide.ffi import to_js
to_js(x)
@run_in_pyodide
def test_recursive_dict_to_js(selenium):
x: Any = {}
x[0] = x
from pyodide.ffi import to_js
to_js(x)
@run_in_pyodide
def test_dict_subclass_to_js(selenium):
"""See issue #4636"""
from collections import ChainMap
from pyodide.code import run_js
j = run_js(
"""
(d) => JSON.stringify(d.toJs({ dict_converter: Object.fromEntries }))
"""
)
class D1(ChainMap, dict): # type: ignore[misc, type-arg]
pass
class D2(dict, ChainMap): # type: ignore[misc, type-arg]
pass
d = {"a": "b"}
assert eval(j(D1({"a": "b"}))) == d
assert eval(j(D2({"a": "b"}))) == d
def test_list_js2py2js(selenium):
selenium.run_js("self.x = [1,2,3];")
assert_js_to_py_to_js(selenium, "x")
def test_dict_js2py2js(selenium):
selenium.run_js("self.x = { a : 1, b : 2, 0 : 3 };")
assert_js_to_py_to_js(selenium, "x")
def test_error_js2py2js(selenium):
selenium.run_js("self.err = new Error('hello there?');")
assert_js_to_py_to_js(selenium, "err")
if selenium.browser == "node":
return
selenium.run_js("self.err = new DOMException('hello there?');")
assert_js_to_py_to_js(selenium, "err")
def test_error_py2js2py(selenium):
selenium.run("err = Exception('hello there?');")
assert_py_to_js_to_py(selenium, "err")
def test_list_py2js2py(selenium):
selenium.run("x = ['a', 'b']")
assert_py_to_js_to_py(selenium, "x")
def test_dict_py2js2py(selenium):
selenium.run("x = {'a' : 5, 'b' : 1}")
assert_py_to_js_to_py(selenium, "x")
@run_in_pyodide
def test_jsproxy_attribute_error(selenium):
import pytest
from pyodide.code import run_js
point = run_js(
"""
class Point {
constructor(x, y) {
this.x = x;
this.y = y;
}
}
new Point(42, 43);
"""
)
assert point.y == 43
with pytest.raises(AttributeError, match="z"):
point.z # noqa: B018
del point.y
with pytest.raises(AttributeError, match="y"):
point.y # noqa: B018
assert run_js("(point) => point.y;")(point) is None
def test_javascript_error(selenium):
msg = "JsException: Error: This is a js error"
with pytest.raises(selenium.JavascriptException, match=msg):
selenium.run(
"""
from js import Error
err = Error.new("This is a js error")
err2 = Error.new("This is another js error")
raise err
"""
)
@run_in_pyodide
def test_javascript_error_back_to_js(selenium):
from pyodide.code import run_js
err = run_js('self.err = new Error("This is a js error"); err')
assert type(err).__name__ == "JsException"
assert run_js(
"""
(py_err) => py_err === err;
"""
)(err)
def test_memoryview_conversion(selenium):
selenium.run(
"""
import array
a = array.array("Q", [1,2,3])
b = array.array("u", "123")
"""
)
selenium.run_js(
"""
pyodide.runPython("a").destroy()
// Implicit assertion: this doesn't leave python error indicator set
// (automatically checked in conftest.py)
"""
)
selenium.run_js(
"""
pyodide.runPython("b").destroy()
// Implicit assertion: this doesn't leave python error indicator set
// (automatically checked in conftest.py)
"""
)
def test_python2js_with_depth(selenium):
selenium.run_js(
"""
const x = pyodide.runPython(`
class Test: pass
[Test(), [Test(), [Test(), [Test()]]]]
`);
const Module = pyodide._module;
const proxies = [];
const result = Module._python2js_with_depth(Module.PyProxy_getPtr(x), -1, proxies);
assert(() => proxies.length === 4);
const result_proxies = [result[0], result[1][0], result[1][1][0], result[1][1][1][0]];
const sortFunc = (x, y) => Module.PyProxy_getPtr(x) < Module.PyProxy_getPtr(y);
proxies.sort(sortFunc);
result_proxies.sort(sortFunc);
for(let i = 0; i < 4; i++){
assert(() => proxies[i] == result_proxies[i]);
}
x.destroy();
for(const px of proxies){
px.destroy();
}
"""
)
@pytest.mark.parametrize("ty", [list, tuple])
@run_in_pyodide
def test_tojs1(selenium, ty):
import json
from pyodide.code import run_js
l = [1, 2, 3]
x = ty(l)
assert run_js("x => Array.isArray(x.toJs())")(x)
serialized = run_js("x => JSON.stringify(x.toJs())")(x)
assert l == json.loads(serialized)
@run_in_pyodide
def test_tojs2(selenium):
import json
from pyodide.code import run_js
o = [(1, 2), (3, 4), [5, 6], {"a": 1, 2: 3, 4: 9}]
assert run_js("(o) => Array.isArray(o.toJs())")(o)
serialized = run_js("(o) => JSON.stringify(o.toJs())")(o)
assert json.loads(serialized) == [[1, 2], [3, 4], [5, 6], {"a": 1, "2": 3, "4": 9}]
serialized = run_js(
"(o) => JSON.stringify(Array.from(Object.entries(o.toJs()[3])))"
)(o)
assert sorted(json.loads(serialized)) == [["2", 3], ["4", 9], ["a", 1]]
def test_tojs4(selenium):
selenium.run_js(
"""
let a = pyodide.runPython("[1,[2,[3,[4,[5,[6,[7]]]]]]]")
for(let i=0; i < 7; i++){
let x = a.toJs({depth : i});
for(let j=0; j < i; j++){
assert(() => Array.isArray(x), `i: ${i}, j: ${j}`);
x = x[1];
}
assert(() => x instanceof pyodide.ffi.PyProxy, `i: ${i}, j: ${i}`);
x.destroy();
}
a.destroy()
"""
)
def test_tojs5(selenium):
selenium.run_js(
"""
let a = pyodide.runPython("[1, (2, (3, [4, (5, (6, [7]))]))]")
for(let i=0; i < 7; i++){
let x = a.toJs({depth : i});
for(let j=0; j < i; j++){
assert(() => Array.isArray(x), `i: ${i}, j: ${j}`);
x = x[1];
}
assert(() => x instanceof pyodide.ffi.PyProxy, `i: ${i}, j: ${i}`);
x.destroy();
}
a.destroy()
"""
)
def test_tojs6(selenium):
selenium.run_js(
"""
let respy = pyodide.runPython(`
a = [1, 2, 3, 4, 5]
b = [a, a, a, a, a]
[b, b, b, b, b]
`);
let total_refs = pyodide._module._hiwire_num_refs();
let res = respy.toJs();
let new_total_refs = pyodide._module._hiwire_num_refs();
respy.destroy();
assert(() => total_refs === new_total_refs);
assert(() => res[0] === res[1]);
assert(() => res[0][0] === res[1][1]);
assert(() => res[4][0] === res[1][4]);
"""
)
def test_tojs7(selenium):
selenium.run_js(
"""
let respy = pyodide.runPython(`
a = [["b"]]
b = [1,2,3, a[0]]
a[0].append(b)
a.append(b)
a
`);
let total_refs = pyodide._module._hiwire_num_refs();
let res = respy.toJs();
let new_total_refs = pyodide._module._hiwire_num_refs();
respy.destroy();
assert(() => total_refs === new_total_refs);
assert(() => res[0][0] === "b");
assert(() => res[1][2] === 3);
assert(() => res[1][3] === res[0]);
assert(() => res[0][1] === res[1]);
"""
)
@pytest.mark.skip_pyproxy_check
@run_in_pyodide
def test_tojs8(selenium):
import pytest
from pyodide.ffi import ConversionError, to_js
msg = r"Cannot use \(2, 2\) as a key for a Javascript"
with pytest.raises(ConversionError, match=msg):
to_js({(2, 2): 0})
with pytest.raises(ConversionError, match=msg):
to_js({(2, 2)})
@run_in_pyodide
def test_tojs9(selenium):
import pytest
from pyodide.code import run_js
from pyodide.ffi import ConversionError, to_js
result1 = to_js({1, "1"})
assert set(run_js("(x) => Array.from(x.values())")(result1)) == {1, "1"}
msg = "Key collision when converting Python dictionary to JavaScript. Key: '1'"
with pytest.raises(ConversionError, match=msg):
to_js({1: 7, "1": 9})
def test_tojs_literalmap(selenium_standalone_noload):
selenium = selenium_standalone_noload
selenium.run_js(
"""
let pyodide = await loadPyodide({toJsLiteralMap: true});
const res = pyodide.runPython(`
from pyodide.ffi import to_js
res = to_js({"a": 6, "b": 10, 6: 9, "get": 77, True: 90})
res
`);
assert(() => res.constructor.name === "LiteralMap");
assert(() => "a" in res);
assert(() => "b" in res);
assert(() => !(6 in res));
assert(() => "get" in res);
assert(() => !(true in res));
assert(() => res.has("a"));
assert(() => res.has("b"));
assert(() => res.has(6));
assert(() => res.has("get"));
assert(() => res.has(true));
assert(() => res.a === 6);
assert(() => res.b === 10);
assert(() => res[6] === undefined);
assert(() => typeof res.get === "function");
assert(() => res[true] === undefined);
assert(() => res.get("a") === 6);
assert(() => res.get("b") === 10);
assert(() => res.get(6) === 9);
assert(() => res.get("get") === 77);
assert(() => res.get(true) === 90);
res.delete("a");
assert(() => !("a" in res));
assert(() => !res.has("a"));
res.a = 7;
assert(() => res.a === 7);
assert(() => res.get("a") === 7);
res.set("a", 99);
assert(() => res.get("a") === 99);
assert(() => res.a === 99);
delete res.a
assert(() => !("a" in res));
assert(() => !res.has("a"));
"""
)
@run_in_pyodide
def test_to_py1(selenium):
from pyodide.code import run_js
a = run_js(
"""
let a = new Map([[1, [1,2,new Set([1,2,3])]], [2, new Map([[1,2],[2,7]])]]);
a.get(2).set("a", a);
a;
"""
)
result = [repr(a.to_py(depth=i)) for i in range(4)]
assert result == [
"[object Map]",
"{1: 1,2,[object Set], 2: [object Map]}",
"{1: [1, 2, [object Set]], 2: {1: 2, 2: 7, 'a': [object Map]}}",
"{1: [1, 2, {1, 2, 3}], 2: {1: 2, 2: 7, 'a': {...}}}",
]
@run_in_pyodide
def test_to_py2(selenium):
from pyodide.code import run_js
a = run_js(
"""
let a = { "x" : 2, "y" : 7, "z" : [1,2] };
a.z.push(a);
a
"""
)
result = [repr(a.to_py(depth=i)) for i in range(4)]
assert result == [
"[object Object]",
"{'x': 2, 'y': 7, 'z': 1,2,[object Object]}",
"{'x': 2, 'y': 7, 'z': [1, 2, [object Object]]}",
"{'x': 2, 'y': 7, 'z': [1, 2, {...}]}",
]
@run_in_pyodide
def test_to_py3(selenium):
from pyodide.code import run_js
a = run_js(
"""
class Temp {
constructor(){
this.x = 2;
this.y = 7;
}
}
new Temp();
"""
)
assert repr(type(a.to_py())) == "<class 'pyodide.ffi.JsProxy'>"
@pytest.mark.parametrize(
"obj, msg",
[
("Map([[[1,1], 2]])", "Cannot use key of type Array as a key to a Python dict"),
("Set([[1,1]])", "Cannot use key of type Array as a key to a Python set"),
("Map([[0, 2], [false, 3]])", "contains both 0 and false"),
("Set([0, false])", "contains both 0 and false"),
("Map([[1, 2], [true, 3]])", "contains both 1 and true"),
("Set([1, true])", "contains both 1 and true"),
],
)
@run_in_pyodide
def test_to_py4(selenium, obj, msg):
import pytest
from pyodide.code import run_js
from pyodide.ffi import ConversionError, JsException
a = run_js(f"new {obj}")
with pytest.raises((ConversionError, JsException), match=msg):
a.to_py()
with pytest.raises((ConversionError, JsException), match=msg):
a = run_js(f"pyodide.toPy(new {obj})")
@run_in_pyodide
def test_to_py_default_converter(selenium):
from pyodide.code import run_js
[r1, r2] = run_js(
"""
class Pair {
constructor(first, second){
this.first = first;
this.second = second;
}
}
let l = [1,2,3];
const r1 = new Pair(l, [l]);
const r2 = new Pair(l, [l]);
r2.first = r2;
const opts = {defaultConverter(value, converter, cache){
if(value.constructor.name !== "Pair"){
return value;
}
let list = pyodide.globals.get("list");
l = list();
list.destroy();
cache(value, l);
const first = converter(value.first);
const second = converter(value.second);
l.append(first);
l.append(second);
first.destroy();
second.destroy();
return l;
}};
pyodide.toPy([r1, r2], opts);
"""
)
assert isinstance(r1, list)
assert r1[0] is r1[1][0]
assert r1[0] == [1, 2, 3]
assert r2[0] is r2
@run_in_pyodide
def test_to_py_default_converter2(selenium):
from typing import Any
from pyodide.code import run_js
[p1, p2] = run_js(
"""
class Pair {
constructor(first, second){
this.first = first;
this.second = second;
}
}
const l = [1,2,3];
const r1 = new Pair(l, [l]);
const r2 = new Pair(l, [l]);
r2.first = r2;
[r1, r2]
"""
)
def default_converter(value, converter, cache):
if value.constructor.name != "Pair":
return value
l: list[Any] = []
cache(value, l)
l.append(converter(value.first))
l.append(converter(value.second))
return l
r1 = p1.to_py(default_converter=default_converter)
assert isinstance(r1, list)
assert r1[0] is r1[1][0]
assert r1[0] == [1, 2, 3]
r2 = p2.to_py(default_converter=default_converter)
assert r2[0] is r2
def test_to_js_default_converter(selenium):
selenium.run_js(
"""
p = pyodide.runPython(`
class Pair:
def __init__(self, first, second):
self.first = first
self.second = second
p = Pair(1,2)
p
`);
let res = p.toJs({ default_converter(x, convert, cacheConversion){
let result = [];
cacheConversion(x, result);
result.push(convert(x.first));
result.push(convert(x.second));
return result;
}});
assert(() => res[0] === 1);
assert(() => res[1] === 2);
p.first = p;
let res2 = p.toJs({ default_converter(x, convert, cacheConversion){
let result = [];
cacheConversion(x, result);
result.push(convert(x.first));
result.push(convert(x.second));
return result;
}});
assert(() => res2[0] === res2);
assert(() => res2[1] === 2);
p.destroy();
"""
)
@run_in_pyodide
def test_to_js_default_converter2(selenium):
import json
import pytest
from js import JSON, Array
from pyodide.code import run_js
from pyodide.ffi import JsException, to_js
class Pair:
__slots__ = ("first", "second")
def __init__(self, first, second):
self.first = first
self.second = second
p1 = Pair(1, 2)
p2 = Pair(1, 2)
p2.first = p2
def default_converter(value, convert, cacheConversion):
result = Array.new()
cacheConversion(value, result)
result.push(convert(value.first))
result.push(convert(value.second))
return result
p1js = to_js(p1, default_converter=default_converter)
p2js = to_js(p2, default_converter=default_converter)
assert json.loads(JSON.stringify(p1js)) == [1, 2]
with pytest.raises(JsException, match="TypeError"):
JSON.stringify(p2js)
assert run_js("(x) => x[0] === x")(p2js)
assert run_js("(x) => x[1] === 2")(p2js)
@run_in_pyodide
def test_to_js_eager_converter(selenium):
import pytest
from js import Array
from pyodide.ffi import ConversionError, destroy_proxies, to_js
recursive_list: Any = []
recursive_list.append(recursive_list)
recursive_dict: Any = {}
recursive_dict[0] = recursive_dict
a_thing = [{1: 2}, (2, 4, 6)]
def normal(value, convert, cacheConversion):
return convert(value)
def reject_tuples(value, convert, cacheConversion):
if isinstance(value, tuple):
raise ConversionError("We don't convert tuples!")
return convert(value)
def proxy_tuples(value, convert, cacheConversion):
if isinstance(value, tuple):
return value
return convert(value)
to_js(recursive_list, eager_converter=normal)
to_js(recursive_dict, eager_converter=normal)
to_js(a_thing, eager_converter=normal)
to_js(recursive_list, eager_converter=reject_tuples)
to_js(recursive_dict, eager_converter=reject_tuples)
with pytest.raises(ConversionError, match="We don't convert tuples"):
to_js(a_thing, eager_converter=reject_tuples)
to_js(recursive_list, eager_converter=proxy_tuples)
to_js(recursive_dict, eager_converter=proxy_tuples)
proxylist = Array.new()
res = to_js(a_thing, eager_converter=proxy_tuples, pyproxies=proxylist)
assert res[-1] == (2, 4, 6)
assert len(proxylist) == 1
destroy_proxies(proxylist)
def test_buffer_format_string(selenium):
errors = [
["aaa", "Expected format string to have length <= 2, got 'aaa'"],
["II", "Unrecognized alignment character I."],
["x", "Unrecognized format character 'x'."],
["x", "Unrecognized format character 'x'."],
["e", "Javascript has no Float16 support."],
]
for fmt, msg in errors:
with pytest.raises(selenium.JavascriptException, match=msg):
selenium.run_js(
f"""
pyodide._module.processBufferFormatString({fmt!r});
"""
)
format_tests = [
("c", "Uint8"),
("b", "Int8"),
("B", "Uint8"),
("?", "Uint8"),
("h", "Int16"),
("H", "Uint16"),
("i", "Int32"),
("I", "Uint32"),
("l", "Int32"),
("L", "Uint32"),
("n", "Int32"),
("N", "Uint32"),
("q", "BigInt64"),
("Q", "BigUint64"),
("f", "Float32"),
("d", "Float64"),
("s", "Uint8"),
("p", "Uint8"),
("P", "Uint32"),
]
def process_fmt_string(fmt):
return selenium.run_js(
f"""
let [array, is_big_endian] = pyodide._module.processBufferFormatString({fmt!r});
if(!array || typeof array.name !== "string" || !array.name.endsWith("Array")){{
throw new Error("Unexpected output on input {fmt}: " + array);
}}
let arrayName = array.name.slice(0, -"Array".length);
return [arrayName, is_big_endian];
"""
)
for fmt, expected_array_name in format_tests:
[array_name, is_big_endian] = process_fmt_string(fmt)
assert not is_big_endian
assert array_name == expected_array_name
endian_tests = [
("@h", "Int16", False),
("=H", "Uint16", False),
("<i", "Int32", False),
(">I", "Uint32", True),
("!l", "Int32", True),
]
for fmt, expected_array_name, expected_is_big_endian in endian_tests:
[array_name, is_big_endian] = process_fmt_string(fmt)
assert is_big_endian == expected_is_big_endian
assert array_name == expected_array_name
def test_dict_converter_cache1(selenium):
selenium.run_js(
"""
let d1 = pyodide.runPython('d={0: {1: 2}}; d[1]=d[0]; d');
let d = d1.toJs({dict_converter: Object.fromEntries});
d1.destroy();
assert(() => d[0] === d[1]);
"""
)
@pytest.mark.xfail(reason="TODO: Fix me")
def test_dict_converter_cache2(selenium):
selenium.run_js(
"""
let d1 = pyodide.runPython('d={0: {1: 2}}; d[1]=d[0]; d[2] = d; d');
let d = d1.toJs({dict_converter: Object.fromEntries});
assert(() => d[2] === d);
"""
)
@run_in_pyodide
def test_dict_and_default_converter(selenium):
from js import Object
from pyodide.ffi import to_js
def default_converter(_obj, c, _):
return c({"a": 2})
class A:
pass
res = to_js(
A, dict_converter=Object.fromEntries, default_converter=default_converter
)
assert res.a == 2
@run_in_pyodide
def test_bind_attrs(selenium):
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
from pyodide.ffi import JsProxy
class A(BindClass):
x: int
y: Annotated[list[int], Deep]
a_px: JsProxy = run_js(
"""
({
x: 7,
y: [1,2,3],
})
"""
)
a = a_px.bind_sig(A)
assert a.x == 7
assert a.y == [1, 2, 3]
@run_in_pyodide
def test_bind_call_convert(selenium):
from typing import Annotated
from _pyodide.jsbind import Deep, Json
from pyodide.code import run_js
def fsig(
a: dict[str, int],
b: Annotated[dict[str, int], Json],
c: Annotated[dict[str, int], Deep],
/,
) -> Annotated[list[int], Deep]:
raise NotImplementedError
f = run_js(
"""
(function f(x, y, z) {
return [x.get("a"), y.b, z.c]
})
"""
).bind_sig(fsig)
x = {"a": 2}
y = {"b": 4}
z = {"c": 6}
assert f(x, y, z) == [2, 4, 6]
@run_in_pyodide
def test_bind_call_bind_return_value(selenium):
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
class A(BindClass):
x: Annotated[list[int], Deep]
def fsig() -> A:
raise NotImplementedError
f = run_js(
"""
(function f() {
return {x: [77, 1]};
})
"""
).bind_sig(fsig)
assert f().x == [77, 1]
@run_in_pyodide
async def test_bind_future_convert_result(selenium):
from asyncio import Future
from typing import Annotated
from _pyodide.jsbind import Deep
from pyodide.code import run_js
def f1() -> Future[Annotated[list[int], Deep]]:
raise NotImplementedError
async def f2() -> Annotated[list[int], Deep]:
raise NotImplementedError
jsfunc = run_js(
"""
(async function() {
return [1,2,3];
})
"""
)
f1 = jsfunc.bind_sig(f1)
f2 = jsfunc.bind_sig(f2)
assert await f1() == [1, 2, 3]
assert await f2() == [1, 2, 3]
@run_in_pyodide
async def test_bind_future_bind_result(selenium):
from asyncio import Future
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
class A(BindClass):
x: Annotated[list[int], Deep]
def f1() -> Future[A]:
raise NotImplementedError
async def f2() -> A:
raise NotImplementedError
jsfunc = run_js(
"""
async function f() {
return {x: [77, 1]};
};
f
"""
)
f1 = jsfunc.bind_sig(f1)
f2 = jsfunc.bind_sig(f2)
assert (await f1()).x == [77, 1]
assert (await f2()).x == [77, 1]
@run_in_pyodide
def test_bind3(selenium):
from pyodide.code import run_js
o = run_js(
"""
({
f(x, y, z) {
return [x.get("a"), y.b, z.c]
},
x: [1,2,3],
y: {
g(x) {
return x.a;
},
c: [1,2,3]
}
})
"""
)
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep, Json
class B(BindClass):
@staticmethod
def g(x: Annotated[dict[str, int], Json], /) -> int:
raise NotImplementedError
c: Annotated[list[int], Deep]
class A(BindClass):
@staticmethod
def f(
a: dict[str, int],
b: Annotated[dict[str, int], Json],
c: Annotated[dict[str, int], Deep],
/,
) -> Annotated[list[int], Deep]:
raise NotImplementedError
x: Annotated[list[int], Deep]
y: B
o2: A = o.bind_sig(A)
f1 = o2.f
f2 = o.f.bind_sig(A.f)
x = {"a": 2}
y = {"b": 4}
z = {"c": 6}
assert o2.f(x, y, z) == [2, 4, 6]
assert f1(x, y, z) == [2, 4, 6]
assert f2(x, y, z) == [2, 4, 6]
assert o2.y.g({"a": 7}) == 7
@run_in_pyodide
async def test_bind_async1(selenium):
from asyncio import Future
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
class A(BindClass):
x: Future[Annotated[list[int], Deep]]
a: A = run_js(
"""
({
x: (async function () {
return [1, 2, 3]
})()
})
"""
).bind_sig(A)
assert await a.x == [1, 2, 3]
@run_in_pyodide
async def test_bind_async2(selenium):
from asyncio import Future
from typing import Annotated
from _pyodide.jsbind import Deep
from pyodide.code import run_js
from pyodide.ffi import JsProxy
jsfunc: JsProxy = run_js(
"""
(async function () {
return [1, 2, 3]
});
"""
)
async def f1() -> Annotated[list[int], Deep]:
raise NotImplementedError
def f2() -> Future[Annotated[list[int], Deep]]:
raise NotImplementedError
f1 = jsfunc.bind_sig(f1)
f2 = jsfunc.bind_sig(f2)
assert await f1() == [1, 2, 3]
assert await f2() == [1, 2, 3]
@run_in_pyodide
async def test_bind_async3(selenium):
from asyncio import Future
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
from pyodide.ffi import JsProxy
class A(BindClass):
x: Annotated[list[int], Deep]
async def f1() -> A:
raise NotImplementedError
def f2() -> Future[A]:
raise NotImplementedError
jsfunc: JsProxy = run_js(
"""
(async function() {
return {
x : [1,2,3]
};
})
"""
)
f1 = jsfunc.bind_sig(f1)
f2 = jsfunc.bind_sig(f2)
assert (await f1()).x == [1, 2, 3]
assert (await f2()).x == [1, 2, 3]
@run_in_pyodide
def test_bind_pre_convert(selenium):
from typing import Annotated, _caches # type:ignore[attr-defined]
from _pyodide.jsbind import Deep, Py2JsConverterMeta
from js import Headers # type:ignore[attr-defined]
from pyodide.code import run_js
from pyodide.ffi import JsProxy
ajs: JsProxy = run_js("(x) => [x.toString(), JSON.stringify(Array.from(x))]")
class ToHeaders(metaclass=Py2JsConverterMeta):
@staticmethod
def pre_convert(value):
return Headers.new(value.items())
def a(
x: Annotated[dict[str, str] | None, ToHeaders], /
) -> Annotated[list[str], Deep]:
return []
abound = ajs.bind_sig(a)
assert abound({"x": "y"}) == ["[object Headers]", '[["x","y"]]']
_caches[Annotated._getitem.__wrapped__].cache_clear() # type:ignore[attr-defined]
@run_in_pyodide
def test_bind_construct(selenium):
from typing import Annotated, Any, NotRequired, TypedDict
from _pyodide.jsbind import Default, Json
from pyodide.code import run_js
from pyodide.ffi import JsProxy
class Inner(TypedDict):
b: int
c: NotRequired[str]
class Outer(TypedDict):
a: list[Inner]
x: int
ajs: JsProxy = run_js("(x) => x")
def a_shape(x: Annotated[Any, Default], /) -> Annotated[Outer, Json]:
raise NotImplementedError
# pyright infers abound has same type as a_shape,
a = ajs.bind_sig(a_shape)
o = run_js("({x: 7, a : [{b: 1, c: 'xyz'},{b: 2},{b: 3}]})")
res = a(o)
assert res["x"] == 7
res["x"] = 9
assert o.x == 9
assert res["a"][0]["b"] == 1
assert res["a"][0]["c"]
assert "c" in res["a"][0]
assert res["a"][0]["c"] == "xyz"
assert res["a"][1]["b"] == 2
assert "c" not in res["a"][1]
res["a"][1]["c"] = "s"
assert o.a[1].c == "s"
@run_in_pyodide
def test_bind_py_json(selenium):
from pyodide.code import run_js
from pyodide.ffi import JsProxy
A: JsProxy = run_js("(class {x = 7})")
class A_sig:
x: int
Abound = A.bind_class(A_sig)
res = Abound()
assert res.x == 7
@run_in_pyodide
def test_bind_class(selenium):
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep
from pyodide.code import run_js
from pyodide.ffi import JsProxy
A_px: JsProxy = run_js("(class {x = [1,2,3]; f() { return [1]; }})")
a_px: JsProxy = run_js("(A) => new A()")(A_px)
class A_sig(BindClass):
x: Annotated[list[int], Deep]
def __init__(self, /): ...
def f(self, /) -> Annotated[list[int], Deep]:
return []
A = A_px.bind_class(A_sig)
res = A()
assert isinstance(res.x, list)
assert isinstance(res.f(), list)
a = a_px.bind_sig(A_sig)
assert isinstance(a.x, list)
assert isinstance(a.f(), list)
@run_in_pyodide
def test_bind__call__(selenium):
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep, Json
from pyodide.code import run_js
from pyodide.ffi import JsProxy
class FuncType(BindClass):
def __call__(
self,
a: dict[str, int],
b: Annotated[dict[str, int], Json],
c: Annotated[dict[str, int], Deep],
/,
) -> Annotated[list[int], Deep]:
return []
f_px: JsProxy = run_js(
"""
(function f(x, y, z) {
return [x.get("a"), y.b, z.c]
})
"""
)
f = f_px.bind_sig(FuncType)
assert f({"a": 7}, {"b": 9}, {"c": 11}) == [7, 9, 11]
@run_in_pyodide
def test_bind_getattr(selenium):
from typing import Annotated
from _pyodide.jsbind import BindClass, Deep, Json
from pyodide.code import run_js
from pyodide.ffi import JsProxy
class FuncType(BindClass):
def __call__(
self,
a: dict[str, int],
b: Annotated[dict[str, int], Json],
c: Annotated[dict[str, int], Deep],
/,
) -> Annotated[list[int], Deep]:
return []
class T:
def __getattr__(self, name: str) -> FuncType:
raise NotImplementedError
t_px: JsProxy = run_js(
"""
({
f(x, y, z) {
return [x.get("a"), y.b, z.c]
},
g() {
return [1, 2, 3];
}
})
"""
)
t = t_px.bind_sig(T)
assert t.f({"a": 7}, {"b": 9}, {"c": 11}) == [7, 9, 11]
assert t.g({"a": 7}, {"b": 9}, {"c": 11}) == [1, 2, 3]
@run_in_pyodide
def test_to_js_no_leak(selenium):
from js import Object
from pyodide.ffi import to_js
d = {"key": Object()}
to_js(d)
@run_in_pyodide
def test_js_callable_not_function(selenium):
from pyodide.code import run_js
o = run_js(
"""
function nonFuncCallable (...params) {
console.log(this);
return [this, ...params]
}
Object.setPrototypeOf(nonFuncCallable, {})
const o = {nonFuncCallable};
o
"""
)
assert list(o.nonFuncCallable(1, 2, 3)) == [o, 1, 2, 3]
| NoHypothesisUnpickler |
python | ray-project__ray | python/ray/tests/test_client_builder.py | {
"start": 2581,
"end": 15780
} | class ____:
def ping(self):
return "pong"
a = Foo.options(lifetime="detached", name="abc").remote()
ray.get(a.ping.remote())
print("Current namespace:", ray.get_runtime_context().namespace)
"""
anon_driver = template.format(namespace="None")
run_string_as_driver(anon_driver)
# This second run will fail if the actors don't run in separate anonymous
# namespaces.
run_string_as_driver(anon_driver)
run_in_namespace = template.format(namespace="'namespace'")
script_output = run_string_as_driver(run_in_namespace)
# The second run fails because the actors are run in the same namespace.
with pytest.raises(subprocess.CalledProcessError):
run_string_as_driver(run_in_namespace)
assert "Current namespace: namespace" in script_output
subprocess.check_output("ray stop --force", shell=True)
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
def test_start_local_cluster():
"""This tests that ray.client() starts a new local cluster when appropriate.
* Using `ray.client("local").connect() ` should always create a new
cluster.
* Using `ray.client().connect()` should create a new cluster if it doesn't
connect to an existing one that was started via `ray start --head`..
"""
driver_template = """
import ray
info = ray.client({address}).connect()
print("NODE_ID:", ray.get_runtime_context().get_node_id())
# Block.
while True:
time.sleep(1)
"""
def _get_node_id(p: subprocess.Popen) -> str:
l = p.stdout.readline().decode("ascii").strip()
assert "NODE_ID" in l
return l[len("NODE_ID: ") :]
p1, p2, p3 = None, None, None
unbuffered = {"PYTHONUNBUFFERED": "1"}
try:
# ray.client() should start a cluster if none is running.
p1 = run_string_as_driver_nonblocking(
driver_template.format(address=""), env=unbuffered
)
p1_node_id = _get_node_id(p1)
# ray.client("local") should always start a cluster.
p2 = run_string_as_driver_nonblocking(driver_template.format(address="'local'"))
p2_node_id = _get_node_id(p2)
# ray.client() shouldn't connect to a cluster started by ray.client() or
# ray.client("local").
p3 = run_string_as_driver_nonblocking(driver_template.format(address=""))
p3_node_id = _get_node_id(p3)
# Check that all three drivers started their own local clusters.
assert len({p1_node_id, p2_node_id, p3_node_id}) == 3
finally:
# Kill processes concurrently.
if p1 is not None:
p1.kill()
if p2 is not None:
p2.kill()
if p3 is not None:
p3.kill()
# Wait for processes to exit.
if p1 is not None:
p1.wait()
if p2 is not None:
p2.wait()
if p3 is not None:
p3.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
def test_connect_to_local_cluster(call_ray_start):
"""This tests that ray.client connects to a local cluster when appropriate.
* Using `ray.client("local").connect() ` should always create a new
cluster even if one is running.
* Using `ray.client().connect()` should connect to a local cluster that was
started with `ray start --head`.
"""
driver_template = """
import ray
info = ray.client({address}).connect()
print("NODE_ID:", ray.get_runtime_context().get_node_id())
"""
def _get_node_id(p: subprocess.Popen) -> str:
l = p.stdout.readline().decode("ascii").strip()
assert "NODE_ID" in l
return l[len("NODE_ID: ") :]
existing_node_id = ray.get_runtime_context().get_node_id()
p1, p2 = None, None
unbuffered = {"PYTHONUNBUFFERED": "1"}
try:
# ray.client() should connect to the running cluster.
p1 = run_string_as_driver_nonblocking(
driver_template.format(address=""), env=unbuffered
)
assert _get_node_id(p1) == existing_node_id
# ray.client("local") should always start a cluster.
p2 = run_string_as_driver_nonblocking(driver_template.format(address="'local'"))
assert _get_node_id(p2) != existing_node_id
finally:
# Kill processes concurrently.
if p1 is not None:
p1.kill()
if p2 is not None:
p2.kill()
# Wait for processes to exit.
if p1 is not None:
p1.wait()
if p2 is not None:
p2.wait()
def test_non_existent_modules():
exception = None
try:
ray.client("badmodule://address")
except RuntimeError as e:
exception = e
assert exception is not None, "Bad Module did not raise RuntimeException"
assert "does not exist" in str(exception)
def test_module_lacks_client_builder():
mock_importlib = Mock()
def mock_import_module(module_string):
if module_string == "ray":
return ray
else:
# Mock() does not have a `ClientBuilder` in its scope
return Mock()
mock_importlib.import_module = mock_import_module
with patch("ray.client_builder.importlib", mock_importlib):
assert isinstance(ray.client(""), ray.ClientBuilder)
assert isinstance(ray.client("ray://"), ray.ClientBuilder)
exception = None
try:
ray.client("othermodule://")
except AssertionError as e:
exception = e
assert (
exception is not None
), "Module without ClientBuilder did not raise AssertionError"
assert "does not have ClientBuilder" in str(exception)
@pytest.mark.skipif(sys.platform == "win32", reason="RC Proxy is Flaky on Windows.")
def test_disconnect(call_ray_stop_only, set_enable_auto_connect):
subprocess.check_output(
"ray start --head --ray-client-server-port=25555", shell=True
)
with ray.client("localhost:25555").namespace("n1").connect():
# Connect via Ray Client
namespace = ray.get_runtime_context().namespace
assert namespace == "n1"
assert ray.util.client.ray.is_connected()
with pytest.raises(ray.exceptions.RaySystemError):
ray.put(300)
with ray.client(None).namespace("n1").connect():
# Connect Directly via Driver
namespace = ray.get_runtime_context().namespace
assert namespace == "n1"
assert not ray.util.client.ray.is_connected()
with pytest.raises(ray.exceptions.RaySystemError):
ray.put(300)
ctx = ray.client("localhost:25555").namespace("n1").connect()
# Connect via Ray Client
namespace = ray.get_runtime_context().namespace
assert namespace == "n1"
assert ray.util.client.ray.is_connected()
ctx.disconnect()
# Check idempotency
ctx.disconnect()
with pytest.raises(ray.exceptions.RaySystemError):
ray.put(300)
@pytest.mark.skipif(sys.platform == "win32", reason="RC Proxy is Flaky on Windows.")
def test_address_resolution(call_ray_stop_only):
subprocess.check_output(
"ray start --head --ray-client-server-port=50055", shell=True
)
with ray.client("localhost:50055").connect():
assert ray.util.client.ray.is_connected()
try:
os.environ["RAY_ADDRESS"] = "local"
with ray.client("localhost:50055").connect():
# client(...) takes precedence of RAY_ADDRESS=local
assert ray.util.client.ray.is_connected()
# This tries to call `ray.init(address="local") which creates a new Ray
# instance.
with ray.client(None).connect():
wait_for_condition(
lambda: len(ray._private.services.find_gcs_addresses()) == 2,
retry_interval_ms=1000,
)
finally:
if os.environ.get("RAY_ADDRESS"):
del os.environ["RAY_ADDRESS"]
ray.shutdown()
def mock_connect(*args, **kwargs):
"""
Force exit instead of actually attempting to connect
"""
raise ConnectionError
def has_client_deprecation_warn(warning: Warning, expected_replacement: str) -> bool:
"""
Returns true if expected_replacement is in the message of the passed
warning, and that the warning mentions deprecation.
"""
start = "Starting a connection through `ray.client` will be deprecated"
message = str(warning.message)
if start not in message:
return False
if expected_replacement not in message:
return False
return True
@pytest.mark.skipif(
sys.platform == "win32", reason="pip not supported in Windows runtime envs."
)
@pytest.mark.filterwarnings(
"default:Starting a connection through `ray.client` will be deprecated"
)
def test_client_deprecation_warn():
"""
Tests that calling ray.client directly raises a deprecation warning with
a copy pasteable replacement for the client().connect() call converted
to ray.init style.
"""
# Test warning when local client mode is used
with warnings.catch_warnings(record=True) as w:
ray.client().connect()
assert any(has_client_deprecation_warn(warning, "ray.init()") for warning in w)
ray.shutdown()
with warnings.catch_warnings(record=True) as w:
ray.client().namespace("nmspc").env({"pip": ["requests"]}).connect()
expected = (
'ray.init(namespace="nmspc", runtime_env=<your_runtime_env>)' # noqa E501
)
assert any(
has_client_deprecation_warn(warning, expected) for warning in w # noqa E501
)
ray.shutdown()
server = ray_client_server.serve("localhost", 50055)
# Test warning when namespace and runtime env aren't specified
with warnings.catch_warnings(record=True) as w:
with ray.client("localhost:50055").connect():
pass
assert any(
has_client_deprecation_warn(warning, 'ray.init("ray://localhost:50055")')
for warning in w
)
# Test warning when just namespace specified
with warnings.catch_warnings(record=True) as w:
with ray.client("localhost:50055").namespace("nmspc").connect():
pass
assert any(
has_client_deprecation_warn(
warning, 'ray.init("ray://localhost:50055", namespace="nmspc")'
)
for warning in w
)
# Test that passing namespace through env doesn't add namespace to the
# replacement
with warnings.catch_warnings(record=True) as w, patch.dict(
os.environ, {"RAY_NAMESPACE": "aksdj"}
):
with ray.client("localhost:50055").connect():
pass
assert any(
has_client_deprecation_warn(warning, 'ray.init("ray://localhost:50055")')
for warning in w
)
# Skip actually connecting on these, since updating the runtime env is
# time consuming
with patch("ray.util.client_connect.connect", mock_connect):
# Test warning when just runtime_env specified
with warnings.catch_warnings(record=True) as w:
try:
ray.client("localhost:50055").env({"pip": ["requests"]}).connect()
except ConnectionError:
pass
expected = 'ray.init("ray://localhost:50055", runtime_env=<your_runtime_env>)' # noqa E501
assert any(has_client_deprecation_warn(warning, expected) for warning in w)
# Test warning works if both runtime env and namespace specified
with warnings.catch_warnings(record=True) as w:
try:
ray.client("localhost:50055").namespace("nmspc").env(
{"pip": ["requests"]}
).connect()
except ConnectionError:
pass
expected = 'ray.init("ray://localhost:50055", namespace="nmspc", runtime_env=<your_runtime_env>)' # noqa E501
assert any(has_client_deprecation_warn(warning, expected) for warning in w)
# We don't expect namespace to appear in the warning message, since
# it was configured through an env var
with warnings.catch_warnings(record=True) as w, patch.dict(
os.environ, {"RAY_NAMESPACE": "abcdef"}
):
try:
ray.client("localhost:50055").env({"pip": ["requests"]}).connect()
except ConnectionError:
pass
expected = 'ray.init("ray://localhost:50055", runtime_env=<your_runtime_env>)' # noqa E501
assert any(has_client_deprecation_warn(warning, expected) for warning in w)
# cleanup
server.stop(0)
subprocess.check_output("ray stop --force", shell=True)
@pytest.mark.parametrize(
"call_ray_start",
[
"ray start --head --num-cpus=2 --min-worker-port=0 --max-worker-port=0 "
"--port 0 --ray-client-server-port=50056"
],
indirect=True,
)
def test_task_use_prestarted_worker(call_ray_start):
ray.init("ray://localhost:50056")
assert len(list_workers(filters=[("worker_type", "!=", "DRIVER")])) == 2
@ray.remote(num_cpus=2)
def f():
return 42
assert ray.get(f.remote()) == 42
assert len(list_workers(filters=[("worker_type", "!=", "DRIVER")])) == 2
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| Foo |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc_metastore.py | {
"start": 19112,
"end": 23039
} | class ____(GoogleCloudBaseOperator):
"""
Delete a single backup.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore backup: %s", self.backup_id)
operation = hook.delete_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s deleted successfully", self.project_id)
| DataprocMetastoreDeleteBackupOperator |
python | pydantic__pydantic | pydantic/v1/main.py | {
"start": 13188,
"end": 44824
} | class ____(Representation, metaclass=ModelMetaclass):
if TYPE_CHECKING:
# populated by the metaclass, defined here to help IDEs only
__fields__: ClassVar[Dict[str, ModelField]] = {}
__include_fields__: ClassVar[Optional[Mapping[str, Any]]] = None
__exclude_fields__: ClassVar[Optional[Mapping[str, Any]]] = None
__validators__: ClassVar[Dict[str, AnyCallable]] = {}
__pre_root_validators__: ClassVar[List[AnyCallable]]
__post_root_validators__: ClassVar[List[Tuple[bool, AnyCallable]]]
__config__: ClassVar[Type[BaseConfig]] = BaseConfig
__json_encoder__: ClassVar[Callable[[Any], Any]] = lambda x: x
__schema_cache__: ClassVar['DictAny'] = {}
__custom_root_type__: ClassVar[bool] = False
__signature__: ClassVar['Signature']
__private_attributes__: ClassVar[Dict[str, ModelPrivateAttr]]
__class_vars__: ClassVar[SetStr]
__fields_set__: ClassVar[SetStr] = set()
Config = BaseConfig
__slots__ = ('__dict__', '__fields_set__')
__doc__ = '' # Null out the Representation docstring
def __init__(__pydantic_self__, **data: Any) -> None:
"""
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be parsed to form a valid model.
"""
# Uses something other than `self` the first arg to allow "self" as a settable attribute
values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
if validation_error:
raise validation_error
try:
object_setattr(__pydantic_self__, '__dict__', values)
except TypeError as e:
raise TypeError(
'Model values must be a dict; you may not have returned a dictionary from a root validator'
) from e
object_setattr(__pydantic_self__, '__fields_set__', fields_set)
__pydantic_self__._init_private_attributes()
@no_type_check
def __setattr__(self, name, value): # noqa: C901 (ignore complexity)
if name in self.__private_attributes__ or name in DUNDER_ATTRIBUTES:
return object_setattr(self, name, value)
if self.__config__.extra is not Extra.allow and name not in self.__fields__:
raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"')
elif not self.__config__.allow_mutation or self.__config__.frozen:
raise TypeError(f'"{self.__class__.__name__}" is immutable and does not support item assignment')
elif name in self.__fields__ and self.__fields__[name].final:
raise TypeError(
f'"{self.__class__.__name__}" object "{name}" field is final and does not support reassignment'
)
elif self.__config__.validate_assignment:
new_values = {**self.__dict__, name: value}
for validator in self.__pre_root_validators__:
try:
new_values = validator(self.__class__, new_values)
except (ValueError, TypeError, AssertionError) as exc:
raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], self.__class__)
known_field = self.__fields__.get(name, None)
if known_field:
# We want to
# - make sure validators are called without the current value for this field inside `values`
# - keep other values (e.g. submodels) untouched (using `BaseModel.dict()` will change them into dicts)
# - keep the order of the fields
if not known_field.field_info.allow_mutation:
raise TypeError(f'"{known_field.name}" has allow_mutation set to False and cannot be assigned')
dict_without_original_value = {k: v for k, v in self.__dict__.items() if k != name}
value, error_ = known_field.validate(value, dict_without_original_value, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
else:
new_values[name] = value
errors = []
for skip_on_failure, validator in self.__post_root_validators__:
if skip_on_failure and errors:
continue
try:
new_values = validator(self.__class__, new_values)
except (ValueError, TypeError, AssertionError) as exc:
errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
if errors:
raise ValidationError(errors, self.__class__)
# update the whole __dict__ as other values than just `value`
# may be changed (e.g. with `root_validator`)
object_setattr(self, '__dict__', new_values)
else:
self.__dict__[name] = value
self.__fields_set__.add(name)
def __getstate__(self) -> 'DictAny':
private_attrs = ((k, getattr(self, k, Undefined)) for k in self.__private_attributes__)
return {
'__dict__': self.__dict__,
'__fields_set__': self.__fields_set__,
'__private_attribute_values__': {k: v for k, v in private_attrs if v is not Undefined},
}
def __setstate__(self, state: 'DictAny') -> None:
object_setattr(self, '__dict__', state['__dict__'])
object_setattr(self, '__fields_set__', state['__fields_set__'])
for name, value in state.get('__private_attribute_values__', {}).items():
object_setattr(self, name, value)
def _init_private_attributes(self) -> None:
for name, private_attr in self.__private_attributes__.items():
default = private_attr.get_default()
if default is not Undefined:
object_setattr(self, name, default)
def dict(
self,
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
by_alias: bool = False,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> 'DictStrAny':
"""
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
"""
if skip_defaults is not None:
warnings.warn(
f'{self.__class__.__name__}.dict(): "skip_defaults" is deprecated and replaced by "exclude_unset"',
DeprecationWarning,
)
exclude_unset = skip_defaults
return dict(
self._iter(
to_dict=True,
by_alias=by_alias,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
)
def json(
self,
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
by_alias: bool = False,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
encoder: Optional[Callable[[Any], Any]] = None,
models_as_dict: bool = True,
**dumps_kwargs: Any,
) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
"""
if skip_defaults is not None:
warnings.warn(
f'{self.__class__.__name__}.json(): "skip_defaults" is deprecated and replaced by "exclude_unset"',
DeprecationWarning,
)
exclude_unset = skip_defaults
encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)
# We don't directly call `self.dict()`, which does exactly this with `to_dict=True`
# because we want to be able to keep raw `BaseModel` instances and not as `dict`.
# This allows users to write custom JSON encoders for given `BaseModel` classes.
data = dict(
self._iter(
to_dict=models_as_dict,
by_alias=by_alias,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
)
if self.__custom_root_type__:
data = data[ROOT_KEY]
return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)
@classmethod
def _enforce_dict_if_root(cls, obj: Any) -> Any:
if cls.__custom_root_type__ and (
not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY})
and not (isinstance(obj, BaseModel) and obj.__fields__.keys() == {ROOT_KEY})
or cls.__fields__[ROOT_KEY].shape in MAPPING_LIKE_SHAPES
):
return {ROOT_KEY: obj}
else:
return obj
@classmethod
def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':
obj = cls._enforce_dict_if_root(obj)
if not isinstance(obj, dict):
try:
obj = dict(obj)
except (TypeError, ValueError) as e:
exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}')
raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e
return cls(**obj)
@classmethod
def parse_raw(
cls: Type['Model'],
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
) -> 'Model':
try:
obj = load_str_bytes(
b,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=cls.__config__.json_loads,
)
except (ValueError, TypeError, UnicodeDecodeError) as e:
raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)
return cls.parse_obj(obj)
@classmethod
def parse_file(
cls: Type['Model'],
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
) -> 'Model':
obj = load_file(
path,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=cls.__config__.json_loads,
)
return cls.parse_obj(obj)
@classmethod
def from_orm(cls: Type['Model'], obj: Any) -> 'Model':
if not cls.__config__.orm_mode:
raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')
obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj)
m = cls.__new__(cls)
values, fields_set, validation_error = validate_model(cls, obj)
if validation_error:
raise validation_error
object_setattr(m, '__dict__', values)
object_setattr(m, '__fields_set__', fields_set)
m._init_private_attributes()
return m
@classmethod
def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':
"""
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if `Config.extra = 'allow'` was set since it adds all passed values
"""
m = cls.__new__(cls)
fields_values: Dict[str, Any] = {}
for name, field in cls.__fields__.items():
if field.alt_alias and field.alias in values:
fields_values[name] = values[field.alias]
elif name in values:
fields_values[name] = values[name]
elif not field.required:
fields_values[name] = field.get_default()
fields_values.update(values)
object_setattr(m, '__dict__', fields_values)
if _fields_set is None:
_fields_set = set(values.keys())
object_setattr(m, '__fields_set__', _fields_set)
m._init_private_attributes()
return m
def _copy_and_set_values(self: 'Model', values: 'DictStrAny', fields_set: 'SetStr', *, deep: bool) -> 'Model':
if deep:
# chances of having empty dict here are quite low for using smart_deepcopy
values = deepcopy(values)
cls = self.__class__
m = cls.__new__(cls)
object_setattr(m, '__dict__', values)
object_setattr(m, '__fields_set__', fields_set)
for name in self.__private_attributes__:
value = getattr(self, name, Undefined)
if value is not Undefined:
if deep:
value = deepcopy(value)
object_setattr(m, name, value)
return m
def copy(
self: 'Model',
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
update: Optional['DictStrAny'] = None,
deep: bool = False,
) -> 'Model':
"""
Duplicate a model, optionally choose which fields to include, exclude and change.
:param include: fields to include in new model
:param exclude: fields to exclude from new model, as with values this takes precedence over include
:param update: values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
:param deep: set to `True` to make a deep copy of the model
:return: new model instance
"""
values = dict(
self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False),
**(update or {}),
)
# new `__fields_set__` can have unset optional fields with a set value in `update` kwarg
if update:
fields_set = self.__fields_set__ | update.keys()
else:
fields_set = set(self.__fields_set__)
return self._copy_and_set_values(values, fields_set, deep=deep)
@classmethod
def schema(cls, by_alias: bool = True, ref_template: str = default_ref_template) -> 'DictStrAny':
cached = cls.__schema_cache__.get((by_alias, ref_template))
if cached is not None:
return cached
s = model_schema(cls, by_alias=by_alias, ref_template=ref_template)
cls.__schema_cache__[(by_alias, ref_template)] = s
return s
@classmethod
def schema_json(
cls, *, by_alias: bool = True, ref_template: str = default_ref_template, **dumps_kwargs: Any
) -> str:
from pydantic.v1.json import pydantic_encoder
return cls.__config__.json_dumps(
cls.schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls: Type['Model'], value: Any) -> 'Model':
if isinstance(value, cls):
copy_on_model_validation = cls.__config__.copy_on_model_validation
# whether to deep or shallow copy the model on validation, None means do not copy
deep_copy: Optional[bool] = None
if copy_on_model_validation not in {'deep', 'shallow', 'none'}:
# Warn about deprecated behavior
warnings.warn(
"`copy_on_model_validation` should be a string: 'deep', 'shallow' or 'none'", DeprecationWarning
)
if copy_on_model_validation:
deep_copy = False
if copy_on_model_validation == 'shallow':
# shallow copy
deep_copy = False
elif copy_on_model_validation == 'deep':
# deep copy
deep_copy = True
if deep_copy is None:
return value
else:
return value._copy_and_set_values(value.__dict__, value.__fields_set__, deep=deep_copy)
value = cls._enforce_dict_if_root(value)
if isinstance(value, dict):
return cls(**value)
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
@classmethod
def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:
if isinstance(obj, GetterDict):
return obj
return cls.__config__.getter_dict(obj)
@classmethod
@no_type_check
def _get_value(
cls,
v: Any,
to_dict: bool,
by_alias: bool,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],
exclude_unset: bool,
exclude_defaults: bool,
exclude_none: bool,
) -> Any:
if isinstance(v, BaseModel):
if to_dict:
v_dict = v.dict(
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=include,
exclude=exclude,
exclude_none=exclude_none,
)
if ROOT_KEY in v_dict:
return v_dict[ROOT_KEY]
return v_dict
else:
return v.copy(include=include, exclude=exclude)
value_exclude = ValueItems(v, exclude) if exclude else None
value_include = ValueItems(v, include) if include else None
if isinstance(v, dict):
return {
k_: cls._get_value(
v_,
to_dict=to_dict,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=value_include and value_include.for_element(k_),
exclude=value_exclude and value_exclude.for_element(k_),
exclude_none=exclude_none,
)
for k_, v_ in v.items()
if (not value_exclude or not value_exclude.is_excluded(k_))
and (not value_include or value_include.is_included(k_))
}
elif sequence_like(v):
seq_args = (
cls._get_value(
v_,
to_dict=to_dict,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=value_include and value_include.for_element(i),
exclude=value_exclude and value_exclude.for_element(i),
exclude_none=exclude_none,
)
for i, v_ in enumerate(v)
if (not value_exclude or not value_exclude.is_excluded(i))
and (not value_include or value_include.is_included(i))
)
return v.__class__(*seq_args) if is_namedtuple(v.__class__) else v.__class__(seq_args)
elif isinstance(v, Enum) and getattr(cls.Config, 'use_enum_values', False):
return v.value
else:
return v
@classmethod
def __try_update_forward_refs__(cls, **localns: Any) -> None:
"""
Same as update_forward_refs but will not raise exception
when forward references are not defined.
"""
update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns, (NameError,))
@classmethod
def update_forward_refs(cls, **localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this Model, globalns and localns.
"""
update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns)
def __iter__(self) -> 'TupleGenerator':
"""
so `dict(model)` works
"""
yield from self.__dict__.items()
def _iter(
self,
to_dict: bool = False,
by_alias: bool = False,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> 'TupleGenerator':
# Merge field set excludes with explicit exclude parameter with explicit overriding field set options.
# The extra "is not None" guards are not logically necessary but optimizes performance for the simple case.
if exclude is not None or self.__exclude_fields__ is not None:
exclude = ValueItems.merge(self.__exclude_fields__, exclude)
if include is not None or self.__include_fields__ is not None:
include = ValueItems.merge(self.__include_fields__, include, intersect=True)
allowed_keys = self._calculate_keys(
include=include, exclude=exclude, exclude_unset=exclude_unset # type: ignore
)
if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none):
# huge boost for plain _iter()
yield from self.__dict__.items()
return
value_exclude = ValueItems(self, exclude) if exclude is not None else None
value_include = ValueItems(self, include) if include is not None else None
for field_key, v in self.__dict__.items():
if (allowed_keys is not None and field_key not in allowed_keys) or (exclude_none and v is None):
continue
if exclude_defaults:
model_field = self.__fields__.get(field_key)
if not getattr(model_field, 'required', True) and getattr(model_field, 'default', _missing) == v:
continue
if by_alias and field_key in self.__fields__:
dict_key = self.__fields__[field_key].alias
else:
dict_key = field_key
if to_dict or value_include or value_exclude:
v = self._get_value(
v,
to_dict=to_dict,
by_alias=by_alias,
include=value_include and value_include.for_element(field_key),
exclude=value_exclude and value_exclude.for_element(field_key),
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
yield dict_key, v
def _calculate_keys(
self,
include: Optional['MappingIntStrAny'],
exclude: Optional['MappingIntStrAny'],
exclude_unset: bool,
update: Optional['DictStrAny'] = None,
) -> Optional[AbstractSet[str]]:
if include is None and exclude is None and exclude_unset is False:
return None
keys: AbstractSet[str]
if exclude_unset:
keys = self.__fields_set__.copy()
else:
keys = self.__dict__.keys()
if include is not None:
keys &= include.keys()
if update:
keys -= update.keys()
if exclude:
keys -= {k for k, v in exclude.items() if ValueItems.is_true(v)}
return keys
def __eq__(self, other: Any) -> bool:
if isinstance(other, BaseModel):
return self.dict() == other.dict()
else:
return self.dict() == other
def __repr_args__(self) -> 'ReprArgs':
return [
(k, v)
for k, v in self.__dict__.items()
if k not in DUNDER_ATTRIBUTES and (k not in self.__fields__ or self.__fields__[k].field_info.repr)
]
_is_base_model_class_defined = True
@overload
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: None = None,
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
**field_definitions: Any,
) -> Type['BaseModel']:
...
@overload
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Union[Type['Model'], Tuple[Type['Model'], ...]],
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
**field_definitions: Any,
) -> Type['Model']:
...
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Union[None, Type['Model'], Tuple[Type['Model'], ...]] = None,
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['Model']:
"""
Dynamically create a model.
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>, e.g.
`foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format
`<name>=<Field>` or `<name>=(<type>, <FieldInfo>)`, e.g.
`foo=Field(datetime, default_factory=datetime.utcnow, alias='bar')` or
`foo=(str, FieldInfo(title='Foo'))`
"""
if __slots__ is not None:
# __slots__ will be ignored from here on
warnings.warn('__slots__ should not be passed to create_model', RuntimeWarning)
if __base__ is not None:
if __config__ is not None:
raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')
if not isinstance(__base__, tuple):
__base__ = (__base__,)
else:
__base__ = (cast(Type['Model'], BaseModel),)
__cls_kwargs__ = __cls_kwargs__ or {}
fields = {}
annotations = {}
for f_name, f_def in field_definitions.items():
if not is_valid_field(f_name):
warnings.warn(f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning)
if isinstance(f_def, tuple):
try:
f_annotation, f_value = f_def
except ValueError as e:
raise ConfigError(
'field definitions should either be a tuple of (<type>, <default>) or just a '
'default value, unfortunately this means tuples as '
'default values are not allowed'
) from e
else:
f_annotation, f_value = None, f_def
if f_annotation:
annotations[f_name] = f_annotation
fields[f_name] = f_value
namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}
if __validators__:
namespace.update(__validators__)
namespace.update(fields)
if __config__:
namespace['Config'] = inherit_config(__config__, BaseConfig)
resolved_bases = resolve_bases(__base__)
meta, ns, kwds = prepare_class(__model_name, resolved_bases, kwds=__cls_kwargs__)
if resolved_bases is not __base__:
ns['__orig_bases__'] = __base__
namespace.update(ns)
return meta(__model_name, resolved_bases, namespace, **kwds)
_missing = object()
def validate_model( # noqa: C901 (ignore complexity)
model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None
) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:
"""
validate data against a model.
"""
values = {}
errors = []
# input_data names, possibly alias
names_used = set()
# field names, never aliases
fields_set = set()
config = model.__config__
check_extra = config.extra is not Extra.ignore
cls_ = cls or model
for validator in model.__pre_root_validators__:
try:
input_data = validator(cls_, input_data)
except (ValueError, TypeError, AssertionError) as exc:
return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)
for name, field in model.__fields__.items():
value = input_data.get(field.alias, _missing)
using_name = False
if value is _missing and config.allow_population_by_field_name and field.alt_alias:
value = input_data.get(field.name, _missing)
using_name = True
if value is _missing:
if field.required:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
continue
value = field.get_default()
if not config.validate_all and not field.validate_always:
values[name] = value
continue
else:
fields_set.add(name)
if check_extra:
names_used.add(field.name if using_name else field.alias)
v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[name] = v_
if check_extra:
if isinstance(input_data, GetterDict):
extra = input_data.extra_keys() - names_used
else:
extra = input_data.keys() - names_used
if extra:
fields_set |= extra
if config.extra is Extra.allow:
for f in extra:
values[f] = input_data[f]
else:
for f in sorted(extra):
errors.append(ErrorWrapper(ExtraError(), loc=f))
for skip_on_failure, validator in model.__post_root_validators__:
if skip_on_failure and errors:
continue
try:
values = validator(cls_, values)
except (ValueError, TypeError, AssertionError) as exc:
errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
if errors:
return values, fields_set, ValidationError(errors, cls_)
else:
return values, fields_set, None
| BaseModel |
python | keon__algorithms | tests/test_strings.py | {
"start": 14575,
"end": 14803
} | class ____(unittest.TestCase):
def test_check_pangram(self):
self.assertTrue(check_pangram("The quick brown fox jumps over the lazy dog"))
self.assertFalse(check_pangram("The quick brown fox"))
| TestCheckPangram |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 125687,
"end": 128541
} | class ____(Request):
"""
Set the model ready flag to True. If the model is an output model of a task then try to publish the task.
:param model: Model id
:type model: str
:param force_publish_task: Publish the associated task (if exists) even if it
is not in the 'stopped' state. Optional, the default value is False.
:type force_publish_task: bool
:param publish_task: Indicates that the associated task (if exists) should be
published. Optional, the default value is True.
:type publish_task: bool
"""
_service = "models"
_action = "set_ready"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force_publish_task": {
"description": "Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False.",
"type": "boolean",
},
"model": {"description": "Model id", "type": "string"},
"publish_task": {
"description": "Indicates that the associated task (if exists) should be published. Optional, the default value is True.",
"type": "boolean",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self, model: str, force_publish_task: Optional[bool] = None, publish_task: Optional[bool] = None, **kwargs: Any
) -> None:
super(SetReadyRequest, self).__init__(**kwargs)
self.model = model
self.force_publish_task = force_publish_task
self.publish_task = publish_task
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("force_publish_task")
def force_publish_task(self) -> Optional[bool]:
return self._property_force_publish_task
@force_publish_task.setter
def force_publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_force_publish_task = None
return
self.assert_isinstance(value, "force_publish_task", (bool,))
self._property_force_publish_task = value
@schema_property("publish_task")
def publish_task(self) -> Optional[bool]:
return self._property_publish_task
@publish_task.setter
def publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_publish_task = None
return
self.assert_isinstance(value, "publish_task", (bool,))
self._property_publish_task = value
| SetReadyRequest |
python | walkccc__LeetCode | solutions/414. Third Maximum Number/414.py | {
"start": 0,
"end": 467
} | class ____:
def thirdMax(self, nums: list[int]) -> int:
max1 = -math.inf # the maximum
max2 = -math.inf # the second maximum
max3 = -math.inf # the third maximum
for num in nums:
if num > max1:
max3 = max2
max2 = max1
max1 = num
elif max1 > num and num > max2:
max3 = max2
max2 = num
elif max2 > num and num > max3:
max3 = num
return max1 if max3 == -math.inf else max3
| Solution |
python | redis__redis-py | tests/test_asyncio/test_retry.py | {
"start": 4305,
"end": 5349
} | class ____:
"Test the Redis client behavior with retries"
async def test_get_set_retry_object(self, request):
retry = Retry(NoBackoff(), 2)
url = request.config.getoption("--redis-url")
r = await Redis.from_url(url, retry_on_timeout=True, retry=retry)
assert r.get_retry()._retries == retry._retries
assert isinstance(r.get_retry()._backoff, NoBackoff)
new_retry_policy = Retry(ExponentialBackoff(), 3)
exiting_conn = await r.connection_pool.get_connection()
r.set_retry(new_retry_policy)
assert r.get_retry()._retries == new_retry_policy._retries
assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
assert exiting_conn.retry._retries == new_retry_policy._retries
await r.connection_pool.release(exiting_conn)
new_conn = await r.connection_pool.get_connection()
assert new_conn.retry._retries == new_retry_policy._retries
await r.connection_pool.release(new_conn)
await r.aclose()
| TestRedisClientRetry |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/layers.py | {
"start": 276,
"end": 1404
} | class ____(object):
def set_input_shape(self, shape):
""" Sets the shape that the layer expects of the input in the forward
pass method """
self.input_shape = shape
def layer_name(self):
""" The name of the layer. Used in model summary. """
return self.__class__.__name__
def parameters(self):
""" The number of trainable parameters used by the layer """
return 0
def forward_pass(self, X, training):
""" Propogates the signal forward in the network """
raise NotImplementedError()
def backward_pass(self, accum_grad):
""" Propogates the accumulated gradient backwards in the network.
If the has trainable weights then these weights are also tuned in this method.
As input (accum_grad) it receives the gradient with respect to the output of the layer and
returns the gradient with respect to the output of the previous layer. """
raise NotImplementedError()
def output_shape(self):
""" The shape of the output produced by forward_pass """
raise NotImplementedError()
| Layer |
python | ipython__ipython | tests/test_profile.py | {
"start": 2069,
"end": 5288
} | class ____(TestCase):
def setUp(self):
# create profile dir
self.pd = ProfileDir.create_profile_dir_by_name(IP_TEST_DIR, "test")
self.options = ["--ipython-dir", IP_TEST_DIR, "--profile", "test"]
self.fname = TMP_TEST_DIR / "test.py"
def tearDown(self):
# We must remove this profile right away so its presence doesn't
# confuse other tests.
shutil.rmtree(self.pd.location)
def init(self, startup_file, startup, test):
# write startup python file
with open(Path(self.pd.startup_dir) / startup_file, "w", encoding="utf-8") as f:
f.write(startup)
# write simple test file, to check that the startup file was run
with open(self.fname, "w", encoding="utf-8") as f:
f.write(test)
def validate(self, output):
tt.ipexec_validate(self.fname, output, "", options=self.options)
def test_startup_py(self):
self.init("00-start.py", "zzz=123\n", "print(zzz)\n")
self.validate("123")
def test_startup_ipy(self):
self.init("00-start.ipy", "%xmode plain\n", "")
self.validate("Exception reporting mode: Plain")
@pytest.mark.skipif(
sys.implementation.name == "pypy"
and ((7, 3, 13) < sys.implementation.version < (7, 3, 16)),
reason="Unicode issues with scandir on PyPy, see https://github.com/pypy/pypy/issues/4860",
)
def test_list_profiles_in():
# No need to remove these directories and files, as they will get nuked in
# the module-level teardown.
td = Path(tempfile.mkdtemp(dir=TMP_TEST_DIR))
for name in ("profile_foo", "profile_hello", "not_a_profile"):
Path(td / name).mkdir(parents=True)
if dec.unicode_paths:
Path(td / "profile_ünicode").mkdir(parents=True)
with open(td / "profile_file", "w", encoding="utf-8") as f:
f.write("I am not a profile directory")
profiles = list_profiles_in(td)
# unicode normalization can turn u'ünicode' into u'u\0308nicode',
# so only check for *nicode, and that creating a ProfileDir from the
# name remains valid
found_unicode = False
for p in list(profiles):
if p.endswith("nicode"):
pd = ProfileDir.find_profile_dir_by_name(td, p)
profiles.remove(p)
found_unicode = True
break
if dec.unicode_paths:
assert found_unicode is True
assert set(profiles) == {"foo", "hello"}
def test_list_bundled_profiles():
# This variable will need to be updated when a new profile gets bundled
bundled = sorted(list_bundled_profiles())
assert bundled == []
def test_profile_create_ipython_dir():
"""ipython profile create respects --ipython-dir"""
with TemporaryDirectory() as td:
getoutput(
[
sys.executable,
"-m",
"IPython",
"profile",
"create",
"foo",
"--ipython-dir=%s" % td,
]
)
profile_dir = Path(td) / "profile_foo"
assert Path(profile_dir).exists()
ipython_config = profile_dir / "ipython_config.py"
assert Path(ipython_config).exists()
| ProfileStartupTest |
python | apache__airflow | providers/snowflake/src/airflow/providers/snowflake/hooks/snowflake.py | {
"start": 2255,
"end": 30416
} | class ____(DbApiHook):
"""
A client to interact with Snowflake.
This hook requires the snowflake_conn_id connection. The snowflake account, login,
and, password field must be setup in the connection. Other inputs can be defined
in the connection or hook instantiation.
:param snowflake_conn_id: Reference to
:ref:`Snowflake connection id<howto/connection:snowflake>`
:param account: snowflake account name
:param authenticator: authenticator for Snowflake.
'snowflake' (default) to use the internal Snowflake authenticator
'externalbrowser' to authenticate using your web browser and
Okta, ADFS or any other SAML 2.0-compliant identify provider
(IdP) that has been defined for your account
``https://<your_okta_account_name>.okta.com`` to authenticate
through native Okta.
:param warehouse: name of snowflake warehouse
:param database: name of snowflake database
:param region: name of snowflake region
:param role: name of snowflake role
:param schema: name of snowflake schema
:param session_parameters: You can set session-level parameters at
the time you connect to Snowflake
:param insecure_mode: Turns off OCSP certificate checks.
For details, see: `How To: Turn Off OCSP Checking in Snowflake Client Drivers - Snowflake Community
<https://community.snowflake.com/s/article/How-to-turn-off-OCSP-checking-in-Snowflake-client-drivers>`__
.. note::
``get_sqlalchemy_engine()`` depends on ``snowflake-sqlalchemy``
"""
conn_name_attr = "snowflake_conn_id"
default_conn_name = "snowflake_default"
conn_type = "snowflake"
hook_name = "Snowflake"
supports_autocommit = True
_test_connection_sql = "select 1"
default_azure_oauth_scope = "api://snowflake_oauth_server/.default"
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import (
BS3PasswordFieldWidget,
BS3TextFieldWidget,
)
from flask_babel import lazy_gettext
from wtforms import BooleanField, PasswordField, StringField
return {
"account": StringField(lazy_gettext("Account"), widget=BS3TextFieldWidget()),
"warehouse": StringField(lazy_gettext("Warehouse"), widget=BS3TextFieldWidget()),
"database": StringField(lazy_gettext("Database"), widget=BS3TextFieldWidget()),
"region": StringField(lazy_gettext("Region"), widget=BS3TextFieldWidget()),
"role": StringField(lazy_gettext("Role"), widget=BS3TextFieldWidget()),
"private_key_file": StringField(lazy_gettext("Private key (Path)"), widget=BS3TextFieldWidget()),
"private_key_content": PasswordField(
lazy_gettext("Private key (Text)"), widget=BS3PasswordFieldWidget()
),
"insecure_mode": BooleanField(
label=lazy_gettext("Insecure mode"), description="Turns off OCSP certificate checks"
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
import json
return {
"hidden_fields": ["port", "host"],
"relabeling": {},
"placeholders": {
"extra": json.dumps(
{
"authenticator": "snowflake oauth",
"private_key_file": "private key",
"session_parameters": "session parameters",
"client_request_mfa_token": "client request mfa token",
"client_store_temporary_credential": "client store temporary credential (externalbrowser mode)",
"grant_type": "refresh_token client_credentials",
"token_endpoint": "token endpoint",
"refresh_token": "refresh token",
},
indent=1,
),
"schema": "snowflake schema",
"login": "snowflake username",
"password": "snowflake password",
"account": "snowflake account name",
"warehouse": "snowflake warehouse name",
"database": "snowflake db name",
"region": "snowflake hosted region",
"role": "snowflake role",
"private_key_file": "Path of snowflake private key (PEM Format)",
"private_key_content": "Content to snowflake private key (PEM format)",
"insecure_mode": "insecure mode",
},
}
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.account = kwargs.pop("account", None)
self.warehouse = kwargs.pop("warehouse", None)
self.database = kwargs.pop("database", None)
self.region = kwargs.pop("region", None)
self.role = kwargs.pop("role", None)
self.schema = kwargs.pop("schema", None)
self.authenticator = kwargs.pop("authenticator", None)
self.session_parameters = kwargs.pop("session_parameters", None)
self.client_request_mfa_token = kwargs.pop("client_request_mfa_token", None)
self.client_store_temporary_credential = kwargs.pop("client_store_temporary_credential", None)
self.query_ids: list[str] = []
def _get_field(self, extra_dict, field_name):
backcompat_prefix = "extra__snowflake__"
backcompat_key = f"{backcompat_prefix}{field_name}"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
f"when using this method."
)
if field_name in extra_dict:
import warnings
if backcompat_key in extra_dict:
warnings.warn(
f"Conflicting params `{field_name}` and `{backcompat_key}` found in extras. "
f"Using value for `{field_name}`. Please ensure this is the correct "
f"value and remove the backcompat key `{backcompat_key}`.",
UserWarning,
stacklevel=2,
)
return extra_dict[field_name] or None
return extra_dict.get(backcompat_key) or None
@property
def account_identifier(self) -> str:
"""Get snowflake account identifier."""
conn_config = self._get_conn_params
account_identifier = f"https://{conn_config['account']}"
if conn_config["region"]:
account_identifier += f".{conn_config['region']}"
return account_identifier
def get_oauth_token(
self,
conn_config: dict | None = None,
token_endpoint: str | None = None,
grant_type: str = "refresh_token",
) -> str:
"""Generate temporary OAuth access token using refresh token in connection details."""
if conn_config is None:
conn_config = self._get_conn_params
url = token_endpoint or f"https://{conn_config['account']}.snowflakecomputing.com/oauth/token-request"
data = {
"grant_type": grant_type,
"redirect_uri": conn_config.get("redirect_uri", "https://localhost.com"),
}
if grant_type == "refresh_token":
data |= {
"refresh_token": conn_config["refresh_token"],
}
elif grant_type == "client_credentials":
pass # no setup necessary for client credentials grant.
else:
raise ValueError(f"Unknown grant_type: {grant_type}")
response = requests.post(
url,
data=data,
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
auth=HTTPBasicAuth(conn_config["client_id"], conn_config["client_secret"]), # type: ignore[arg-type]
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e: # pragma: no cover
msg = f"Response: {e.response.content.decode()} Status Code: {e.response.status_code}"
raise AirflowException(msg)
token = response.json()["access_token"]
return token
def get_azure_oauth_token(self, azure_conn_id: str) -> str:
"""
Generate OAuth access token using Azure connection id.
This uses AzureBaseHook on the connection id to retrieve the token. Scope for the OAuth token can be
set in the config option ``azure_oauth_scope`` under the section ``[snowflake]``.
:param azure_conn_id: The connection id for the Azure connection that will be used to fetch the token.
:raises AttributeError: If AzureBaseHook does not have a get_token method which happens when
package apache-airflow-providers-microsoft-azure<12.8.0.
:returns: The OAuth access token string.
"""
if TYPE_CHECKING:
from airflow.providers.microsoft.azure.hooks.azure_base import AzureBaseHook
try:
azure_conn = Connection.get(azure_conn_id)
except AttributeError:
azure_conn = Connection.get_connection_from_secrets(azure_conn_id) # type: ignore[attr-defined]
try:
azure_base_hook: AzureBaseHook = azure_conn.get_hook()
except TypeError as e:
if "required positional argument: 'sdk_client'" in str(e):
raise AirflowOptionalProviderFeatureException(
"Getting azure token is not supported by current version of 'AzureBaseHook'. "
"Please upgrade apache-airflow-providers-microsoft-azure>=12.8.0"
) from e
raise
scope = conf.get("snowflake", "azure_oauth_scope", fallback=self.default_azure_oauth_scope)
token = azure_base_hook.get_token(scope).token
return token
@cached_property
def _get_conn_params(self) -> dict[str, str | None]:
"""
Fetch connection params as a dict.
This is used in ``get_uri()`` and ``get_connection()``.
"""
conn = self.get_connection(self.get_conn_id())
extra_dict = conn.extra_dejson
account = self._get_field(extra_dict, "account") or ""
warehouse = self._get_field(extra_dict, "warehouse") or ""
database = self._get_field(extra_dict, "database") or ""
region = self._get_field(extra_dict, "region") or ""
role = self._get_field(extra_dict, "role") or ""
insecure_mode = _try_to_boolean(self._get_field(extra_dict, "insecure_mode"))
json_result_force_utf8_decoding = _try_to_boolean(
self._get_field(extra_dict, "json_result_force_utf8_decoding")
)
schema = conn.schema or ""
client_request_mfa_token = _try_to_boolean(self._get_field(extra_dict, "client_request_mfa_token"))
client_store_temporary_credential = _try_to_boolean(
self._get_field(extra_dict, "client_store_temporary_credential")
)
# authenticator and session_parameters never supported long name so we don't use _get_field
authenticator = extra_dict.get("authenticator", "snowflake")
session_parameters = extra_dict.get("session_parameters")
conn_config = {
"user": conn.login,
"password": conn.password or "",
"schema": self.schema or schema,
"database": self.database or database,
"account": self.account or account,
"warehouse": self.warehouse or warehouse,
"region": self.region or region,
"role": self.role or role,
"authenticator": self.authenticator or authenticator,
"session_parameters": self.session_parameters or session_parameters,
# application is used to track origin of the requests
"application": os.environ.get("AIRFLOW_SNOWFLAKE_PARTNER", "AIRFLOW"),
}
if insecure_mode:
conn_config["insecure_mode"] = insecure_mode
if json_result_force_utf8_decoding:
conn_config["json_result_force_utf8_decoding"] = json_result_force_utf8_decoding
if client_request_mfa_token:
conn_config["client_request_mfa_token"] = client_request_mfa_token
if client_store_temporary_credential:
conn_config["client_store_temporary_credential"] = client_store_temporary_credential
# If private_key_file is specified in the extra json, load the contents of the file as a private key.
# If private_key_content is specified in the extra json, use it as a private key.
# As a next step, specify this private key in the connection configuration.
# The connection password then becomes the passphrase for the private key.
# If your private key is not encrypted (not recommended), then leave the password empty.
private_key_file = self._get_field(extra_dict, "private_key_file")
private_key_content = self._get_field(extra_dict, "private_key_content")
private_key_pem = None
if private_key_content and private_key_file:
raise AirflowException(
"The private_key_file and private_key_content extra fields are mutually exclusive. "
"Please remove one."
)
if private_key_file:
private_key_file_path = Path(private_key_file)
if not private_key_file_path.is_file() or private_key_file_path.stat().st_size == 0:
raise ValueError("The private_key_file path points to an empty or invalid file.")
if private_key_file_path.stat().st_size > 4096:
raise ValueError("The private_key_file size is too big. Please keep it less than 4 KB.")
private_key_pem = Path(private_key_file_path).read_bytes()
elif private_key_content:
private_key_pem = base64.b64decode(private_key_content)
if private_key_pem:
passphrase = None
if conn.password:
passphrase = conn.password.strip().encode()
p_key = serialization.load_pem_private_key(
private_key_pem, password=passphrase, backend=default_backend()
)
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
conn_config["private_key"] = pkb
conn_config.pop("password", None)
refresh_token = self._get_field(extra_dict, "refresh_token") or ""
if refresh_token:
conn_config["refresh_token"] = refresh_token
conn_config["authenticator"] = "oauth"
if conn_config.get("authenticator") == "oauth":
if extra_dict.get("azure_conn_id"):
conn_config["token"] = self.get_azure_oauth_token(extra_dict["azure_conn_id"])
else:
token_endpoint = self._get_field(extra_dict, "token_endpoint") or ""
conn_config["client_id"] = conn.login
conn_config["client_secret"] = conn.password
conn_config["token"] = self.get_oauth_token(
conn_config=conn_config,
token_endpoint=token_endpoint,
grant_type=extra_dict.get("grant_type", "refresh_token"),
)
conn_config.pop("login", None)
conn_config.pop("user", None)
conn_config.pop("password", None)
# configure custom target hostname and port, if specified
snowflake_host = extra_dict.get("host")
snowflake_port = extra_dict.get("port")
if snowflake_host:
conn_config["host"] = snowflake_host
if snowflake_port:
conn_config["port"] = snowflake_port
# if a value for ocsp_fail_open is set, pass it along.
# Note the check is for `is not None` so that we can pass along `False` as a value.
ocsp_fail_open = extra_dict.get("ocsp_fail_open")
if ocsp_fail_open is not None:
conn_config["ocsp_fail_open"] = _try_to_boolean(ocsp_fail_open)
return conn_config
def get_uri(self) -> str:
"""Override DbApiHook get_uri method for get_sqlalchemy_engine()."""
conn_params = self._get_conn_params
return self._conn_params_to_sqlalchemy_uri(conn_params)
def _conn_params_to_sqlalchemy_uri(self, conn_params: dict) -> str:
return URL(
**{
k: v
for k, v in conn_params.items()
if v
and k
not in [
"session_parameters",
"insecure_mode",
"private_key",
"client_request_mfa_token",
"client_store_temporary_credential",
"json_result_force_utf8_decoding",
"ocsp_fail_open",
]
}
)
def get_conn(self) -> SnowflakeConnection:
"""Return a snowflake.connection object."""
conn_config = self._get_conn_params
conn = connector.connect(**conn_config)
return conn
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
engine_kwargs = engine_kwargs or {}
conn_params = self._get_conn_params
if "insecure_mode" in conn_params:
engine_kwargs.setdefault("connect_args", {})
engine_kwargs["connect_args"]["insecure_mode"] = True
if "json_result_force_utf8_decoding" in conn_params:
engine_kwargs.setdefault("connect_args", {})
engine_kwargs["connect_args"]["json_result_force_utf8_decoding"] = True
if "ocsp_fail_open" in conn_params:
engine_kwargs.setdefault("connect_args", {})
engine_kwargs["connect_args"]["ocsp_fail_open"] = conn_params["ocsp_fail_open"]
for key in ["session_parameters", "private_key"]:
if conn_params.get(key):
engine_kwargs.setdefault("connect_args", {})
engine_kwargs["connect_args"][key] = conn_params[key]
return create_engine(self._conn_params_to_sqlalchemy_uri(conn_params), **engine_kwargs)
def get_snowpark_session(self):
"""
Get a Snowpark session object.
:return: the created session.
"""
from snowflake.snowpark import Session
from airflow import __version__ as airflow_version
from airflow.providers.snowflake import __version__ as provider_version
conn_config = self._get_conn_params
session = Session.builder.configs(conn_config).create()
# add query tag for observability
session.update_query_tag(
{
"airflow_version": airflow_version,
"airflow_provider_version": provider_version,
}
)
return session
def set_autocommit(self, conn, autocommit: Any) -> None:
conn.autocommit(autocommit)
conn.autocommit_mode = autocommit
def get_autocommit(self, conn):
return getattr(conn, "autocommit_mode", False)
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: None = ...,
split_statements: bool = ...,
return_last: bool = ...,
return_dictionaries: bool = ...,
) -> None: ...
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: Callable[[Any], T] = ...,
split_statements: bool = ...,
return_last: bool = ...,
return_dictionaries: bool = ...,
) -> tuple | list[tuple] | list[list[tuple] | tuple] | None: ...
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping[str, Any] | None = None,
handler: Callable[[Any], T] | None = None,
split_statements: bool = True,
return_last: bool = True,
return_dictionaries: bool = False,
) -> tuple | list[tuple] | list[list[tuple] | tuple] | None:
"""
Run a command or list of commands.
Pass a list of SQL statements to the SQL parameter to get them to
execute sequentially. The result of the queries is returned if the
``handler`` callable is set.
:param sql: The SQL string to be executed with possibly multiple
statements, or a list of sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of
each statement.
:param split_statements: Whether to split a single SQL string into
statements and run separately
:param return_last: Whether to return result for only last statement or
for all after split.
:param return_dictionaries: Whether to return dictionaries rather than
regular DBAPI sequences as rows in the result. The dictionaries are
of form ``{ 'column1_name': value1, 'column2_name': value2 ... }``.
:return: Result of the last SQL statement if *handler* is set.
*None* otherwise.
"""
self.query_ids = []
if isinstance(sql, str):
if split_statements:
split_statements_tuple = util_text.split_statements(StringIO(sql))
sql_list: Iterable[str] = [
sql_string for sql_string, _ in split_statements_tuple if sql_string
]
else:
sql_list = [self.strip_sql_string(sql)]
else:
sql_list = sql
if sql_list:
self.log.debug("Executing following statements against Snowflake DB: %s", sql_list)
else:
raise ValueError("List of SQL statements is empty")
with closing(self.get_conn()) as conn:
self.set_autocommit(conn, autocommit)
with self._get_cursor(conn, return_dictionaries) as cur:
results = []
for sql_statement in sql_list:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = self._make_common_data_structure(handler(cur))
if return_single_query_results(sql, return_last, split_statements):
_last_result = result
_last_description = cur.description
else:
results.append(result)
self.descriptions.append(cur.description)
query_id = cur.sfqid
self.log.info("Rows affected: %s", cur.rowcount)
self.log.info("Snowflake query id: %s", query_id)
self.query_ids.append(query_id)
# If autocommit was set to False or db does not support autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if return_single_query_results(sql, return_last, split_statements):
self.descriptions = [_last_description]
return _last_result
return results
@contextmanager
def _get_cursor(self, conn: Any, return_dictionaries: bool):
cursor = None
try:
if return_dictionaries:
cursor = conn.cursor(DictCursor)
else:
cursor = conn.cursor()
yield cursor
finally:
if cursor is not None:
cursor.close()
def get_openlineage_database_info(self, connection) -> DatabaseInfo:
from airflow.providers.openlineage.sqlparser import DatabaseInfo
database = self.database or self._get_field(connection.extra_dejson, "database")
return DatabaseInfo(
scheme=self.get_openlineage_database_dialect(connection),
authority=self._get_openlineage_authority(connection),
information_schema_columns=[
"table_schema",
"table_name",
"column_name",
"ordinal_position",
"data_type",
"table_catalog",
],
database=database,
is_information_schema_cross_db=True,
is_uppercase_names=True,
)
def get_openlineage_database_dialect(self, _) -> str:
return "snowflake"
def get_openlineage_default_schema(self) -> str | None:
return self._get_conn_params["schema"]
def _get_openlineage_authority(self, _) -> str | None:
uri = fix_snowflake_sqlalchemy_uri(self.get_uri())
return urlparse(uri).hostname
def get_openlineage_database_specific_lineage(self, task_instance) -> OperatorLineage | None:
"""
Emit separate OpenLineage events for each Snowflake query, based on executed query IDs.
If a single query ID is present, also add an `ExternalQueryRunFacet` to the returned lineage metadata.
Note that `get_openlineage_database_specific_lineage` is usually called after task's execution,
so if multiple query IDs are present, both START and COMPLETE event for each query will be emitted
after task's execution. If we are able to query Snowflake for query execution metadata,
query event times will correspond to actual query's start and finish times.
Args:
task_instance: The Airflow TaskInstance object for which lineage is being collected.
Returns:
An `OperatorLineage` object if a single query ID is found; otherwise `None`.
"""
from airflow.providers.common.compat.openlineage.facet import ExternalQueryRunFacet
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.sqlparser import SQLParser
from airflow.providers.snowflake.utils.openlineage import (
emit_openlineage_events_for_snowflake_queries,
)
if not self.query_ids:
self.log.info("OpenLineage could not find snowflake query ids.")
return None
self.log.debug("openlineage: getting connection to get database info")
connection = self.get_connection(self.get_conn_id())
namespace = SQLParser.create_namespace(self.get_openlineage_database_info(connection))
self.log.info("Separate OpenLineage events will be emitted for each query_id.")
emit_openlineage_events_for_snowflake_queries(
task_instance=task_instance,
hook=self,
query_ids=self.query_ids,
query_for_extra_metadata=True,
query_source_namespace=namespace,
)
if len(self.query_ids) == 1:
self.log.debug("Attaching ExternalQueryRunFacet with single query_id to OpenLineage event.")
return OperatorLineage(
run_facets={
"externalQuery": ExternalQueryRunFacet(
externalQueryId=self.query_ids[0], source=namespace
)
}
)
return None
| SnowflakeHook |
python | fluentpython__example-code-2e | 24-class-metaprog/checked/initsub/checkedlib.py | {
"start": 2078,
"end": 2919
} | class ____:
def __init__(self, name: str, constructor: Callable) -> None: # <2>
if not callable(constructor) or constructor is type(None): # <3>
raise TypeError(f'{name!r} type hint must be callable')
self.name = name
self.constructor = constructor
def __set__(self, instance: Any, value: Any) -> None:
if value is ...: # <4>
value = self.constructor()
else:
try:
value = self.constructor(value) # <5>
except (TypeError, ValueError) as e: # <6>
type_name = self.constructor.__name__
msg = f'{value!r} is not compatible with {self.name}:{type_name}'
raise TypeError(msg) from e
instance.__dict__[self.name] = value # <7>
# end::CHECKED_FIELD[]
# tag::CHECKED_TOP[]
| Field |
python | walkccc__LeetCode | solutions/3484. Design Spreadsheet/3484.py | {
"start": 0,
"end": 513
} | class ____:
def __init__(self, rows: int) -> None:
self.spreadsheet = {}
def setCell(self, cell: str, value: int) -> None:
self.spreadsheet[cell] = value
def resetCell(self, cell: str) -> None:
self.spreadsheet[cell] = 0
def getValue(self, formula: str) -> int:
i = formula.find('+')
return self._getToken(formula[1:i]) + self._getToken(formula[i+1:])
def _getToken(self, token: str) -> int:
return int(token) if token[0].isdigit() else self.spreadsheet.get(token, 0)
| Spreadsheet |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1212290,
"end": 1214248
} | class ____(Sort):
"""
EncodingSortField schema wrapper.
A sort definition for sorting a discrete scale in an encoding field definition.
Parameters
----------
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
The data `field <https://vega.github.io/vega-lite/docs/field.html>`__ to sort by.
**Default value:** If unspecified, defaults to the field specified in the outer data
reference.
op : :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
An `aggregate operation
<https://vega.github.io/vega-lite/docs/aggregate.html#ops>`__ to perform on the
field prior to sorting (e.g., ``"count"``, ``"mean"`` and ``"median"``). An
aggregation is required when there are multiple values of the sort field for each
encoded data field. The input data objects will be aggregated, grouped by the
encoded data field.
For a full list of operations, please see the documentation for `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html#ops>`__.
**Default value:** ``"sum"`` for stacked plots. Otherwise, ``"min"``.
order : :class:`SortOrder`, Literal['ascending', 'descending'], None
The sort order. One of ``"ascending"`` (default), ``"descending"``, or ``null`` (do
not sort).
"""
_schema = {"$ref": "#/definitions/EncodingSortField"}
def __init__(
self,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
**kwds,
):
super().__init__(field=field, op=op, order=order, **kwds)
| EncodingSortField |
python | readthedocs__readthedocs.org | readthedocs/api/v2/serializers.py | {
"start": 11011,
"end": 12006
} | class ____(serializers.ModelSerializer):
"""Remote service repository serializer."""
organization = RemoteOrganizationSerializer()
# This field does create an additional query per object returned
matches = serializers.SerializerMethodField()
admin = serializers.SerializerMethodField("is_admin")
class Meta:
model = RemoteRepository
exclude = ("users",)
def get_matches(self, obj):
request = self.context["request"]
if request.user is not None and request.user.is_authenticated:
return obj.matches(request.user)
def is_admin(self, obj):
request = self.context["request"]
# Use annotated value from RemoteRepositoryViewSet queryset
if hasattr(obj, "admin"):
return obj.admin
if request.user and request.user.is_authenticated:
return obj.remote_repository_relations.filter(user=request.user, admin=True).exists()
return False
| RemoteRepositorySerializer |
python | mkdocs__mkdocs | mkdocs/config/defaults.py | {
"start": 296,
"end": 833
} | class ____(c.OptionallyRequired[int]):
levels: Mapping[str, int] = {
"warn": logging.WARNING,
"info": logging.INFO,
"ignore": logging.DEBUG,
}
def run_validation(self, value: object) -> int:
if not isinstance(value, str):
raise base.ValidationError(f"Expected a string, but a {type(value)} was given.")
try:
return self.levels[value]
except KeyError:
raise base.ValidationError(f"Expected one of {list(self.levels)}, got {value!r}")
| _LogLevel |
python | huggingface__transformers | src/transformers/models/smollm3/modeling_smollm3.py | {
"start": 2234,
"end": 8585
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: SmolLM3Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[SmolLM3Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| SmolLM3RotaryEmbedding |
python | pdm-project__pdm | src/pdm/cli/options.py | {
"start": 2361,
"end": 15494
} | class ____(Option):
"""A reusable argument group object which can call `add_argument()`
to add more arguments. And itself will be registered to the parser later.
"""
def __init__(self, name: str, is_mutually_exclusive: bool = False, required: bool = False) -> None:
self.name = name
self.options: list[Option] = []
self.required = required
self.is_mutually_exclusive = is_mutually_exclusive
def add_argument(self, *args: Any, **kwargs: Any) -> None:
if args and isinstance(args[0], Option):
self.options.append(args[0])
else:
self.options.append(Option(*args, **kwargs))
def add_to_parser(self, parser: argparse._ActionsContainer) -> None:
group: argparse._ArgumentGroup
if self.is_mutually_exclusive:
group = parser.add_mutually_exclusive_group(required=self.required)
else:
group = parser.add_argument_group(self.name)
for option in self.options:
option.add_to_group(group)
def add_to_group(self, group: argparse._ArgumentGroup) -> None:
self.add_to_parser(group)
def split_lists(separator: str) -> type[argparse.Action]:
"""
Works the same as `append` except each argument
is considered a `separator`-separated list.
"""
class SplitList(argparse.Action):
def __call__(
self,
parser: argparse.ArgumentParser,
args: argparse.Namespace,
values: Any,
option_string: str | None = None,
) -> None:
if not isinstance(values, str):
return
split = getattr(args, self.dest) or []
split.extend(value.strip() for value in values.split(separator) if value.strip())
setattr(args, self.dest, split)
return SplitList
def from_splitted_env(name: str, separator: str) -> list[str] | None:
"""
Parse a `separator`-separated list from a `name` environment variable if present.
"""
value = os.getenv(name)
if not value:
return None
return [v.strip() for v in value.split(separator) if v.strip()] or None
verbose_option = ArgumentGroup("Verbosity options", is_mutually_exclusive=True)
verbose_option.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Use `-v` for detailed output and `-vv` for more detailed",
)
verbose_option.add_argument("-q", "--quiet", action="store_const", const=-1, dest="verbose", help="Suppress output")
no_cache_option = Option(
"--no-cache",
action="store_true",
default=os.getenv("PDM_NO_CACHE"),
help="Disable the cache for the current command. [env var: PDM_NO_CACHE]",
)
dry_run_option = Option(
"--dry-run",
action="store_true",
default=False,
help="Show the difference only and don't perform any action",
)
lockfile_option = Option(
"-L",
"--lockfile",
default=os.getenv("PDM_LOCKFILE"),
help="Specify another lockfile path. Default: pdm.lock. [env var: PDM_LOCKFILE]",
)
@Option(
"--frozen-lockfile",
"--no-lock",
nargs=0,
help="Don't try to create or update the lockfile. [env var: PDM_FROZEN_LOCKFILE]",
)
def frozen_lockfile_option(
project: Project,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
if option_string == "--no-lock":
project.core.ui.warn("--no-lock is deprecated, use --frozen-lockfile instead.")
project.enable_write_lockfile = False # type: ignore[has-type]
@Option(
"--pep582",
const="AUTO",
metavar="SHELL",
nargs="?",
help="Print the command line to be eval'd by the shell for PEP 582",
)
def pep582_option(
project: Project,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
from pdm.cli.actions import print_pep582_command
print_pep582_command(project, cast(str, values))
sys.exit(0)
install_group = ArgumentGroup("Install options")
install_group.add_argument(
"--no-editable",
action="store_true",
default=bool(os.getenv("PDM_NO_EDITABLE")),
dest="no_editable",
help="Install non-editable versions for all packages. [env var: PDM_NO_EDITABLE]",
)
install_group.add_argument(
"--no-self",
action="store_true",
default=bool(os.getenv("PDM_NO_SELF")),
dest="no_self",
help="Don't install the project itself. [env var: PDM_NO_SELF]",
)
install_group.add_argument("--fail-fast", "-x", action="store_true", help="Abort on first installation error")
@Option(
"--no-isolation",
dest="build_isolation",
nargs=0,
help="Disable isolation when building a source distribution that follows PEP 517, "
"as in: build dependencies specified by PEP 518 must be already installed if this option is used.",
)
def no_isolation_option(
project: Project,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
project.core.state.build_isolation = False
install_group.options.append(no_isolation_option)
groups_group = ArgumentGroup("Dependencies Selection")
groups_group.add_argument(
"-G",
"--group",
"--with",
dest="groups",
metavar="GROUP",
action=split_lists(","),
help="Select group of optional-dependencies separated by comma "
"or dependency-groups (with `-d`). Can be supplied multiple times, "
'use ":all" to include all groups under the same species.',
default=[],
)
groups_group.add_argument(
"--without",
dest="excluded_groups",
metavar="",
action=split_lists(","),
help="Exclude groups of optional-dependencies or dependency-groups",
default=[],
)
groups_group.add_argument(
"--no-default",
dest="default",
action="store_false",
default=True,
help="Don't include dependencies from the default group",
)
dev_group = ArgumentGroup("dev", is_mutually_exclusive=True)
dev_group.add_argument(
"-d",
"--dev",
default=None,
dest="dev",
action="store_true",
help="Select dev dependencies",
)
dev_group.add_argument(
"--prod",
"--production",
dest="dev",
action="store_false",
help="Unselect dev dependencies",
)
groups_group.options.append(dev_group)
save_strategy_group = ArgumentGroup("Save Strategy")
_save_sub_group = ArgumentGroup("save_strategy", is_mutually_exclusive=True)
_save_sub_group.add_argument(
"--save-compatible",
action="store_const",
dest="save_strategy",
const="compatible",
help="Save compatible version specifiers",
)
_save_sub_group.add_argument(
"--save-safe-compatible",
action="store_const",
dest="save_strategy",
const="safe_compatible",
help="Save safe compatible version specifiers",
)
_save_sub_group.add_argument(
"--save-wildcard",
action="store_const",
dest="save_strategy",
const="wildcard",
help="Save wildcard version specifiers",
)
_save_sub_group.add_argument(
"--save-exact",
action="store_const",
dest="save_strategy",
const="exact",
help="Save exact version specifiers",
)
_save_sub_group.add_argument(
"--save-minimum",
action="store_const",
dest="save_strategy",
const="minimum",
help="Save minimum version specifiers",
)
save_strategy_group.add_argument(_save_sub_group)
skip_option = Option(
"-k",
"--skip",
dest="skip",
action=split_lists(","),
help="Skip some tasks and/or hooks by their comma-separated names."
" Can be supplied multiple times."
' Use ":all" to skip all hooks.'
' Use ":pre" and ":post" to skip all pre or post hooks.',
default=from_splitted_env("PDM_SKIP_HOOKS", ","),
)
update_strategy_group = ArgumentGroup("Update Strategy")
_update_sub_group = ArgumentGroup("update_strategy", is_mutually_exclusive=True)
_update_sub_group.add_argument(
"--update-reuse",
action="store_const",
dest="update_strategy",
const="reuse",
help="Reuse pinned versions already present in lock file if possible",
)
_update_sub_group.add_argument(
"--update-eager",
action="store_const",
dest="update_strategy",
const="eager",
help="Try to update the packages and their dependencies recursively",
)
_update_sub_group.add_argument(
"--update-all",
action="store_const",
dest="update_strategy",
const="all",
help="Update all dependencies and sub-dependencies",
)
_update_sub_group.add_argument(
"--update-reuse-installed",
action="store_const",
dest="update_strategy",
const="reuse-installed",
help="Reuse installed packages if possible",
)
update_strategy_group.add_argument(_update_sub_group)
project_option = Option(
"-p",
"--project",
dest="project_path",
help="Specify another path as the project root, which changes the base of pyproject.toml "
"and __pypackages__ [env var: PDM_PROJECT]",
default=os.getenv("PDM_PROJECT"),
)
global_option = Option(
"-g",
"--global",
dest="global_project",
action="store_true",
help="Use the global project, supply the project root with `-p` option",
)
clean_group = ArgumentGroup("clean", is_mutually_exclusive=True)
clean_group.add_argument("--clean", action="store_true", help="Clean packages not in the lockfile")
clean_group.add_argument(
"--only-keep", "--clean-unselected", action="store_true", help="Only keep the selected packages"
)
packages_group = ArgumentGroup("Package Arguments")
packages_group.add_argument(
"-e",
"--editable",
dest="editables",
action="append",
help="Specify editable packages",
default=[],
)
packages_group.add_argument("packages", nargs="*", help="Specify packages")
@Option(
"-I",
"--ignore-python",
nargs=0,
help="Ignore the Python path saved in .pdm-python. [env var: PDM_IGNORE_SAVED_PYTHON]",
)
def ignore_python_option(
project: Project,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
os.environ.update({"PDM_IGNORE_SAVED_PYTHON": "1"})
@Option(
"-n",
"--non-interactive",
nargs=0,
dest="_non_interactive",
help="Don't show interactive prompts but use defaults. [env var: PDM_NON_INTERACTIVE]",
)
def non_interactive_option(
project: Project,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
os.environ.update({"PDM_NON_INTERACTIVE": "1"})
prerelease_option = ArgumentGroup("prerelease", is_mutually_exclusive=True)
prerelease_option.add_argument(
"--pre",
"--prerelease",
action="store_true",
dest="prerelease",
default=None,
help="Allow prereleases to be pinned",
)
prerelease_option.add_argument(
"--stable", action="store_false", dest="prerelease", help="Only allow stable versions to be pinned"
)
unconstrained_option = Option(
"-u",
"--unconstrained",
action="store_true",
default=False,
help="Ignore the version constraints in pyproject.toml and overwrite with new ones from the resolution result",
)
venv_option = Option(
"--venv",
dest="use_venv",
metavar="NAME",
nargs="?",
const="in-project",
help="Run the command in the virtual environment with the given key. [env var: PDM_IN_VENV]",
default=os.getenv("PDM_IN_VENV"),
)
lock_strategy_group = ArgumentGroup("Lock Strategy")
lock_strategy_group.add_argument(
"--strategy",
"-S",
dest="strategy_change",
metavar="STRATEGY",
action=split_lists(","),
help="Specify lock strategy (cross_platform, static_urls, direct_minimal_versions, inherit_metadata). "
"Add 'no_' prefix to disable. Can be supplied multiple times or split by comma.",
)
lock_strategy_group.add_argument(
"--no-cross-platform",
action="append_const",
dest="strategy_change",
const="no_cross_platform",
help="[DEPRECATED] Only lock packages for the current platform",
)
lock_strategy_group.add_argument(
"--static-urls",
action="append_const",
dest="strategy_change",
help="[DEPRECATED] Store static file URLs in the lockfile",
const="static_urls",
)
lock_strategy_group.add_argument(
"--no-static-urls",
action="append_const",
dest="strategy_change",
help="[DEPRECATED] Do not store static file URLs in the lockfile",
const="no_static_urls",
)
config_setting_option = Option(
"--config-setting",
"-C",
action=ExtendMapAction,
help="Pass options to the builder. Options with a value must be "
'specified after "=": `--config-setting=key(=value)` '
"or `-Ckey(=value)`",
)
install_group.options.append(config_setting_option)
override_option = Option(
"--override",
default=[env] if (env := os.getenv("PDM_OVERRIDE")) else None,
action="append",
help="Use the constraint file in pip-requirements format for overriding. [env var: PDM_OVERRIDE] "
"This option can be used multiple times. "
"See https://pip.pypa.io/en/stable/user_guide/#constraints-files",
)
| ArgumentGroup |
python | sympy__sympy | sympy/stats/frv_types.py | {
"start": 1757,
"end": 3385
} | class ____(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def pmf(self, x):
x = Symbol('x')
return Lambda(x, Piecewise(*(
[(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)])))
@property
def set(self):
return set(self.dict.keys())
@staticmethod
def check(density):
for p in density.values():
_value_check((p >= 0, p <= 1),
"Probability at a point must be between 0 and 1.")
val = sum(density.values())
_value_check(Eq(val, 1) != S.false, "Total Probability must be 1.")
def FiniteRV(name, density, **kwargs):
r"""
Create a Finite Random Variable given a dict representing the density.
Parameters
==========
name : Symbol
Represents name of the random variable.
density : dict
Dictionary containing the pdf of finite distribution
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X >= 2)
0.700000000000000
Returns
=======
RandomSymbol
"""
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(name, FiniteDistributionHandmade, density, **kwargs)
| FiniteDistributionHandmade |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/base_preprocessing_layer.py | {
"start": 18795,
"end": 23669
} | class ____(object):
"""Functional object that defines a shardable computation.
This object defines functions required to create and manipulate data objects.
These data objects, referred to below as 'accumulators', are computation-
specific and may be implemented alongside concrete subclasses of Combiner
(if necessary - some computations may be simple enough that standard Python
types can be used as accumulators).
The intent for this class is that by describing computations in this way, we
can arbitrarily shard a dataset, perform computations on a subset, and then
merge the computation into a final result. This enables distributed
computation.
The combiner itself does not own any state - all computational state is owned
by the accumulator objects. This is so that we can have an arbitrary number of
Combiners (thus sharding the computation N ways) without risking any change
to the underlying computation. These accumulator objects are uniquely
associated with each Combiner; a Combiner defines what the accumulator object
should be and will only work with accumulators of that type.
"""
__metaclass__ = abc.ABCMeta
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
@abc.abstractmethod
def compute(self, batch_values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator.
This method computes a step of the computation described by this Combiner.
If an accumulator is passed, the data in that accumulator is also used; so
compute(batch_values) results in f(batch_values), while
compute(batch_values, accumulator) results in
merge(f(batch_values), accumulator).
Args:
batch_values: A list of ndarrays representing the values of the inputs for
this step of the computation.
accumulator: the current accumulator. Can be None.
Returns:
An accumulator that includes the passed batch of inputs.
"""
pass
@abc.abstractmethod
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator.
This method takes the partial values in several accumulators and combines
them into a single accumulator. This computation must not be order-specific
(that is, merge([a, b]) must return the same result as merge([b, a]).
Args:
accumulators: the accumulators to merge, as a list.
Returns:
A merged accumulator.
"""
pass
@abc.abstractmethod
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: The accumulator to convert.
Returns:
A dict of ndarrays representing the data in this accumulator.
"""
pass
@abc.abstractmethod
def restore(self, output):
"""Create an accumulator based on 'output'.
This method creates a new accumulator with identical internal state to the
one used to create the data in 'output'. This means that if you do
output_data = combiner.extract(accumulator_1)
accumulator_2 = combiner.restore(output_data)
then accumulator_1 and accumulator_2 will have identical internal state, and
computations using either of them will be equivalent.
Args:
output: The data output from a previous computation. Should be in the same
form as provided by 'extract_output'.
Returns:
A new accumulator.
"""
pass
@abc.abstractmethod
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call.
This function serializes an accumulator to be sent to a remote process.
Args:
accumulator: The accumulator to serialize.
Returns:
A byte string representing the passed accumulator.
"""
pass
@abc.abstractmethod
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'.
This function deserializes an accumulator serialized by 'serialize()'.
Args:
encoded_accumulator: A byte string representing an accumulator.
Returns:
The accumulator represented by the passed byte_string.
"""
pass
def _disallow_inside_tf_function(method_name):
"""Disallow calling a method inside a `tf.function`."""
if ops.inside_function():
error_msg = (
'Detected a call to `PreprocessingLayer.{method_name}` inside a '
'`tf.function`. `PreprocessingLayer.{method_name} is a high-level '
'endpoint that manages its own `tf.function`. Please move the call '
'to `PreprocessingLayer.{method_name}` outside of all enclosing '
'`tf.function`s. Note that you can call a `PreprocessingLayer` '
'directly on `Tensor`s inside a `tf.function` like: `layer(x)`, '
'or update its state like: `layer.update_state(x)`.').format(
method_name=method_name)
raise RuntimeError(error_msg)
| Combiner |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/generic_class.py | {
"start": 183,
"end": 279
} | class ____(Generic[T]):
"""docstring for A"""
def __init__(self, a, b=None):
pass
| A |
python | walkccc__LeetCode | solutions/1860. Incremental Memory Leak/1860.py | {
"start": 0,
"end": 259
} | class ____:
def memLeak(self, memory1: int, memory2: int) -> list[int]:
i = 1
while memory1 >= i or memory2 >= i:
if memory1 >= memory2:
memory1 -= i
else:
memory2 -= i
i += 1
return [i, memory1, memory2]
| Solution |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_tensors.py | {
"start": 169,
"end": 2613
} | class ____(ir.Value):
"""A subclass of ir.Value that supports Python operators."""
def __init__(
self,
opset: onnxscript.values.Opset,
name: str | None = None,
shape: ir.Shape | None = None,
type: ir.TypeProtocol | None = None,
doc_string: str | None = None,
const_value: ir.TensorProtocol | None = None,
) -> None:
super().__init__(
name=name,
shape=shape,
type=type,
doc_string=doc_string,
const_value=const_value,
)
self._opset = opset
@property
def rank(self) -> int | None:
# pyrefly: ignore [missing-attribute]
if self.shape is None:
return None
# pyrefly: ignore [bad-argument-type]
return len(self.shape)
# TODO: Implement indexing
def __mod__(self, other):
# pyrefly: ignore [missing-attribute]
if self.dtype in {
ir.DataType.FLOAT,
ir.DataType.DOUBLE,
ir.DataType.FLOAT16,
ir.DataType.BFLOAT16,
}:
return self._opset.Mod(self, other, fmod=1)
return self._opset.Mod(self, other)
def __ne__(self, other):
return self._opset.Not(self._opset.Equal(self, other))
def __neg__(self):
return self._opset.Neg(self)
def __add__(self, other):
return self._opset.Add(self, other)
def __radd__(self, other):
return self._opset.Add(other, self)
def __rand__(self, other):
return self._opset.And(other, self)
def __mul__(self, other):
return self._opset.Mul(self, other)
def __rmul__(self, other):
return self._opset.Mul(other, self)
def __matmul__(self, other):
return self._opset.MatMul(self, other)
def __pow__(self, other):
return self._opset.Pow(self, other)
def __sub__(self, other):
return self._opset.Sub(self, other)
def __rsub__(self, other):
return self._opset.Sub(other, self)
def __truediv__(self, other):
return self._opset.Div(self, other)
def __lt__(self, other):
return self._opset.Less(self, other)
def __le__(self, other):
return self._opset.LessOrEqual(self, other)
def __ge__(self, other):
return self._opset.GreaterOrEqual(self, other)
def __gt__(self, other):
return self._opset.Greater(self, other)
| SymbolicTensor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.