language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
astropy__astropy
|
astropy/coordinates/representation/geodetic.py
|
{
"start": 6735,
"end": 6893
}
|
class ____(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
GRS80GeodeticRepresentation
|
python
|
docker__docker-py
|
tests/unit/models_configs_test.py
|
{
"start": 104,
"end": 363
}
|
class ____(unittest.TestCase):
def test_create_config(self):
client = make_fake_client()
config = client.configs.create(name="super_config", data="config")
assert config.__repr__() == f"<Config: '{FAKE_CONFIG_NAME}'>"
|
CreateConfigsTest
|
python
|
django-debug-toolbar__django-debug-toolbar
|
tests/panels/test_profiling.py
|
{
"start": 467,
"end": 3689
}
|
class ____(BaseTestCase):
panel_id = ProfilingPanel.panel_id
def test_regular_view(self):
self._get_response = lambda request: regular_view(request, "profiling")
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
self.assertIn("func_list", self.panel.get_stats())
self.assertIn("regular_view", self.panel.content)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_request.
"""
self._get_response = lambda request: regular_view(request, "profiling")
response = self.panel.process_request(self.request)
# ensure the panel does not have content yet.
self.assertNotIn("regular_view", self.panel.content)
self.panel.generate_stats(self.request, response)
self.reload_stats()
# ensure the panel renders correctly.
content = self.panel.content
self.assertIn("regular_view", content)
self.assertIn("render", content)
self.assertValidHTML(content)
# ensure traces aren't escaped
self.assertIn('<span class="djdt-path">', content)
@override_settings(DEBUG_TOOLBAR_CONFIG={"PROFILER_THRESHOLD_RATIO": 1})
def test_cum_time_threshold(self):
"""
Test that cumulative time threshold excludes calls
"""
self._get_response = lambda request: regular_view(request, "profiling")
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel renders but doesn't include our function.
content = self.panel.content
self.assertIn("regular_view", content)
self.assertNotIn("render", content)
self.assertValidHTML(content)
@unittest.skipUnless(
sys.version_info < (3, 12, 0),
"Python 3.12 no longer contains a frame for list comprehensions.",
)
def test_listcomp_escaped(self):
self._get_response = lambda request: listcomp_view(request)
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
content = self.panel.content
self.assertNotIn('<span class="djdt-func"><listcomp></span>', content)
self.assertIn('<span class="djdt-func"><listcomp></span>', content)
def test_generate_stats_no_profiler(self):
"""
Test generating stats with no profiler.
"""
response = HttpResponse()
self.assertIsNone(self.panel.generate_stats(self.request, response))
def test_generate_stats_no_root_func(self):
"""
Test generating stats using profiler without root function.
"""
response = self.panel.process_request(self.request)
self.panel.profiler.clear()
self.panel.profiler.enable()
self.panel.profiler.disable()
self.panel.generate_stats(self.request, response)
self.assertNotIn("func_list", self.panel.get_stats())
@override_settings(
DEBUG=True, DEBUG_TOOLBAR_PANELS=["debug_toolbar.panels.profiling.ProfilingPanel"]
)
|
ProfilingPanelTestCase
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typePrinter1.py
|
{
"start": 171,
"end": 203
}
|
class ____:
class Inner: ...
|
A
|
python
|
apache__thrift
|
contrib/zeromq/TZmqClient.py
|
{
"start": 904,
"end": 2059
}
|
class ____(TTransportBase, CReadableTransport):
def __init__(self, ctx, endpoint, sock_type):
self._sock = ctx.socket(sock_type)
self._endpoint = endpoint
self._wbuf = StringIO()
self._rbuf = StringIO()
def open(self):
self._sock.connect(self._endpoint)
def read(self, size):
ret = self._rbuf.read(size)
if len(ret) != 0:
return ret
self._read_message()
return self._rbuf.read(size)
def _read_message(self):
msg = self._sock.recv()
self._rbuf = StringIO(msg)
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
msg = self._wbuf.getvalue()
self._wbuf = StringIO()
self._sock.send(msg)
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._rbuf
# NOTE: This will probably not actually work.
def cstringio_refill(self, prefix, reqlen):
while len(prefix) < reqlen:
self.read_message()
prefix += self._rbuf.getvalue()
self._rbuf = StringIO(prefix)
return self._rbuf
|
TZmqClient
|
python
|
astropy__astropy
|
astropy/io/misc/ecsv.py
|
{
"start": 11305,
"end": 12482
}
|
class ____(ECSVEngine):
"""ECSV reader engine using astropy.io.ascii Python CSV reader."""
name = "io.ascii"
format = "ascii.csv"
def convert_np_type(self, np_type: str) -> np.generic:
# Convert the np_type string to a numpy dtype type like np.int32, np.float64,
# etc. This output is compatible with io.ascii `converters` option where is gets
# used.
return np.dtype(np_type).type
def get_data_kwargs(
self,
header: ECSVHeader,
null_values: list[str],
) -> dict[str, Any]:
kw = {}
kw["fill_values"] = get_null_values_per_column(
header.cols, header.table_meta, null_values
)
kw["header_start"] = header.n_header - header.n_empty
kw["converters"] = self.get_converters(header)
# Fast reader does not support converters (defining types in advance) nor any
# encoding. Converters are required, e.g. for a string column that looks like
# floats. Would be nice to fix this, but in mean time use Python CSV reader.
kw["fast_reader"] = False
kw["strip_column_names"] = False
return kw
|
ECSVEngineIoAscii
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_merge_cells01.py
|
{
"start": 315,
"end": 1110
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("merge_cells01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"align": "center"})
worksheet.set_selection("A4")
worksheet.merge_range("A1:A2", "col1", cell_format)
worksheet.merge_range("B1:B2", "col2", cell_format)
worksheet.merge_range("C1:C2", "col3", cell_format)
worksheet.merge_range("D1:D2", "col4", cell_format)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
ansible__ansible
|
lib/ansible/utils/_junit_xml.py
|
{
"start": 3605,
"end": 6451
}
|
class ____:
"""A collection of test cases."""
name: str
hostname: str | None = None
id: str | None = None
package: str | None = None
timestamp: datetime.datetime | None = None
properties: dict[str, str] = dataclasses.field(default_factory=dict)
cases: list[TestCase] = dataclasses.field(default_factory=list)
system_out: str | None = None
system_err: str | None = None
def __post_init__(self):
if self.timestamp and self.timestamp.tzinfo != datetime.timezone.utc:
raise ValueError(f'timestamp.tzinfo must be {datetime.timezone.utc!r}')
@property
def disabled(self) -> int:
"""The number of disabled test cases."""
return sum(case.is_disabled for case in self.cases)
@property
def errors(self) -> int:
"""The number of test cases containing error info."""
return sum(case.is_error for case in self.cases)
@property
def failures(self) -> int:
"""The number of test cases containing failure info."""
return sum(case.is_failure for case in self.cases)
@property
def skipped(self) -> int:
"""The number of test cases containing skipped info."""
return sum(case.is_skipped for case in self.cases)
@property
def tests(self) -> int:
"""The number of test cases."""
return len(self.cases)
@property
def time(self) -> decimal.Decimal:
"""The total time from all test cases."""
return decimal.Decimal(sum(case.time for case in self.cases if case.time))
def get_attributes(self) -> dict[str, str]:
"""Return a dictionary of attributes for this instance."""
return _attributes(
disabled=self.disabled,
errors=self.errors,
failures=self.failures,
hostname=self.hostname,
id=self.id,
name=self.name,
package=self.package,
skipped=self.skipped,
tests=self.tests,
time=self.time,
timestamp=self.timestamp.replace(tzinfo=None).isoformat(timespec='seconds') if self.timestamp else None,
)
def get_xml_element(self) -> ET.Element:
"""Return an XML element representing this instance."""
element = ET.Element('testsuite', self.get_attributes())
if self.properties:
ET.SubElement(element, 'properties').extend([ET.Element('property', dict(name=name, value=value)) for name, value in self.properties.items()])
element.extend([test_case.get_xml_element() for test_case in self.cases])
if self.system_out:
ET.SubElement(element, 'system-out').text = self.system_out
if self.system_err:
ET.SubElement(element, 'system-err').text = self.system_err
return element
@dataclasses.dataclass
|
TestSuite
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py
|
{
"start": 3411,
"end": 6758
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3TrackerVideoMaskDecoder`]. It is used to instantiate a SAM3_TRACKER_VIDEO
memory encoder according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the SAM3_TRACKER_VIDEO mask decoder.
mlp_dim (`int`, *optional*, defaults to 2048):
The dimension of the MLP in the two-way transformer.
num_hidden_layers (`int`, *optional*, defaults to 2):
The number of hidden layers in the two-way transformer.
num_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads in the two-way transformer.
attention_downsample_rate (`int`, *optional*, defaults to 2):
The downsample rate for the attention layers.
num_multimask_outputs (`int`, *optional*, defaults to 3):
The number of multimask outputs.
iou_head_depth (`int`, *optional*, defaults to 3):
The depth of the IoU head.
iou_head_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the IoU head.
dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`):
Whether to use dynamic multimask via stability.
dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05):
The stability delta for the dynamic multimask.
dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98):
The stability threshold for the dynamic multimask.
"""
base_config_key = "mask_decoder_config"
def __init__(
self,
hidden_size=256,
hidden_act="gelu",
mlp_dim=2048,
num_hidden_layers=2,
num_attention_heads=8,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
dynamic_multimask_via_stability=True,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_multimask_outputs = num_multimask_outputs
self.hidden_act = hidden_act
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
# TwoWayTransformer configuration
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.mlp_dim = mlp_dim
self.attention_downsample_rate = attention_downsample_rate
|
Sam3TrackerVideoMaskDecoderConfig
|
python
|
django__django
|
django/conf/__init__.py
|
{
"start": 1014,
"end": 5473
}
|
class ____(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE)
)
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return "<LazySettings [Unevaluated]>"
return '<LazySettings "%(settings_module)s">' % {
"settings_module": self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if (_wrapped := self._wrapped) is empty:
self._setup(name)
_wrapped = self._wrapped
val = getattr(_wrapped, name)
# Special case some settings which require further modification.
# This is done here for performance reasons so the modified value is
# cached.
if name in {"MEDIA_URL", "STATIC_URL"} and val is not None:
val = self._add_script_prefix(val)
elif name == "SECRET_KEY" and not val:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == "_wrapped":
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError("Settings already configured.")
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
if not name.isupper():
raise TypeError("Setting %r must be uppercase." % name)
setattr(holder, name, value)
self._wrapped = holder
@staticmethod
def _add_script_prefix(value):
"""
Add SCRIPT_NAME prefix to relative paths.
Useful when the app is being served at a subpath and manually prefixing
subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.
"""
# Don't apply prefix to absolute paths and URLs.
if value.startswith(("http://", "https://", "/")):
return value
from django.urls import get_script_prefix
return "%s%s" % (get_script_prefix(), value)
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
def _show_deprecation_warning(self, message, category):
stack = traceback.extract_stack()
# Show a warning if the setting is used outside of Django.
# Stack index: -1 this line, -2 the property, -3 the
# LazyObject __getattribute__(), -4 the caller.
filename, _, _, _ = stack[-4]
if not filename.startswith(os.path.dirname(django.__file__)):
warnings.warn(message, category, stacklevel=2)
|
LazySettings
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/communicator_objects/unity_to_external_pb2_grpc.py
|
{
"start": 929,
"end": 2044
}
|
class ____(object):
"""Missing associated documentation comment in .proto file."""
def Exchange(self, request, context):
"""Sends the academy parameters
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UnityToExternalProtoServicer_to_server(servicer, server):
rpc_method_handlers = {
'Exchange': grpc.unary_unary_rpc_method_handler(
servicer.Exchange,
request_deserializer=mlagents__envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessageProto.FromString,
response_serializer=mlagents__envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessageProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'communicator_objects.UnityToExternalProto', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
UnityToExternalProtoServicer
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_dynamic.py
|
{
"start": 60397,
"end": 67743
}
|
class ____:
@testing.fixture
def transient_fixture(self, user_address_fixture):
def _transient_fixture(addresses_args={}):
User, Address = user_address_fixture(addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
yield _transient_fixture
@testing.fixture
def persistent_fixture(self, user_address_fixture):
def _persistent_fixture(autoflush=True, addresses_args={}):
User, Address = user_address_fixture(addresses_args=addresses_args)
u1 = User(name="u1")
a1 = Address(email_address="a1")
s = fixture_session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
yield _persistent_fixture
@testing.fixture
def persistent_m2m_fixture(self, order_item_fixture):
def _persistent_m2m_fixture(autoflush=True, items_args={}):
Order, Item = order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = fixture_session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
yield _persistent_m2m_fixture
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
sess = inspect(obj).session
if sess:
sess.autoflush = False
try:
if self.lazy == "write_only" and compare_passive is not None:
eq_(
attributes.get_history(
obj, attrname, PassiveFlag.PASSIVE_NO_FETCH
),
compare_passive,
)
else:
eq_(
attributes.get_history(
obj,
attrname,
(
PassiveFlag.PASSIVE_NO_FETCH
if self.lazy == "write_only"
else PassiveFlag.PASSIVE_OFF
),
),
compare,
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(
obj, attrname, attributes.LOAD_AGAINST_COMMITTED
),
compare_passive,
)
finally:
if sess:
sess.autoflush = True
def test_add_transient(self, transient_fixture):
u1, a1 = transient_fixture()
u1.addresses.add(a1)
self._assert_history(u1, ([a1], [], []))
def test_add_persistent(self, persistent_fixture):
u1, a1, s = persistent_fixture()
u1.addresses.add(a1)
self._assert_history(u1, ([a1], [], []))
def test_remove_transient(self, transient_fixture):
u1, a1 = transient_fixture()
u1.addresses.add(a1)
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], []))
def test_backref_pop_transient(self, transient_fixture):
u1, a1 = transient_fixture(addresses_args={"backref": "user"})
u1.addresses.add(a1)
self._assert_history(u1, ([a1], [], []))
a1.user = None
# removed from added
self._assert_history(u1, ([], [], []))
def test_remove_persistent(self, persistent_fixture):
u1, a1, s = persistent_fixture()
u1.addresses.add(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], [a1]))
def test_backref_pop_persistent_autoflush_o2m_active_hist(
self, persistent_fixture
):
u1, a1, s = persistent_fixture(
addresses_args={"backref": backref("user", active_history=True)}
)
u1.addresses.add(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1, ([], [], [a1]))
def test_backref_pop_persistent_autoflush_m2m(
self, persistent_m2m_fixture
):
o1, i1, s = persistent_m2m_fixture(items_args={"backref": "orders"})
o1.items.add(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1, ([], [], [i1]))
def test_backref_pop_persistent_noflush_m2m(self, persistent_m2m_fixture):
o1, i1, s = persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False
)
o1.items.add(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1, ([], [], [i1]))
def test_unchanged_persistent(self, persistent_fixture):
Address = self.classes.Address
u1, a1, s = persistent_fixture()
a2, a3 = Address(email_address="a2"), Address(email_address="a3")
u1.addresses.add(a1)
u1.addresses.add(a2)
s.flush()
u1.addresses.add(a3)
u1.addresses.remove(a2)
self._assert_history(
u1, ([a3], [a1], [a2]), compare_passive=([a3], [], [a2])
)
def test_replace_transient(self, transient_fixture):
Address = self.classes.Address
u1, a1 = transient_fixture()
a2, a3, a4, a5 = (
Address(email_address="a2"),
Address(email_address="a3"),
Address(email_address="a4"),
Address(email_address="a5"),
)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1, ([a2, a3, a4, a5], [], []))
@testing.combinations(True, False, argnames="autoflush")
def test_replace_persistent(self, autoflush, persistent_fixture):
User = self.classes.User
Address = self.classes.Address
u1, a1, s = persistent_fixture(autoflush=autoflush)
a2, a3, a4, a5 = (
Address(email_address="a2"),
Address(email_address="a3"),
Address(email_address="a4"),
Address(email_address="a5"),
)
if User.addresses.property.lazy == "write_only":
with self._expect_no_iteration():
u1.addresses = [a1, a2]
return
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
if not autoflush:
self._assert_history(u1, ([a2, a3, a4, a5], [], []))
else:
self._assert_history(
u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1]),
)
@testing.combinations(True, False, argnames="autoflush")
def test_persistent_but_readded(self, autoflush, persistent_fixture):
u1, a1, s = persistent_fixture(autoflush=autoflush)
u1.addresses.add(a1)
s.flush()
u1.addresses.add(a1)
self._assert_history(
u1, ([], [a1], []), compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self, persistent_fixture):
u1, a1, s = persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], []), compare_passive=([], [], [a1]))
|
_HistoryTest
|
python
|
pytorch__pytorch
|
tools/linter/adapters/actionlint_linter.py
|
{
"start": 374,
"end": 4125
}
|
class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
RESULTS_RE: re.Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?P<char>\d+):
\s(?P<message>.*)
\s(?P<code>\[.*\])
$
"""
)
def run_command(
args: list[str],
) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def check_file(
binary: str,
file: str,
) -> list[LintMessage]:
try:
proc = run_command(
[
binary,
"-ignore",
'"runs-on" section must be sequence node but got mapping node with "!!map" tag',
"-ignore",
'input "freethreaded" is not defined in action "actions/setup-python@v',
file,
]
)
except OSError as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
stdout = str(proc.stdout, "utf-8").strip()
return [
LintMessage(
path=match["file"],
name=match["code"],
description=match["message"],
line=int(match["line"]),
char=int(match["char"]),
code=LINTER_CODE,
severity=LintSeverity.ERROR,
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(stdout)
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="actionlint runner",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="actionlint binary path",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
if not os.path.exists(args.binary):
err_msg = LintMessage(
path="<none>",
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Could not find actionlint binary at {args.binary},"
" you may need to run `lintrunner init`."
),
)
print(json.dumps(err_msg._asdict()), flush=True)
sys.exit(0)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(
check_file,
args.binary,
filename,
): filename
for filename in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
|
LintMessage
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v2/views/model_views.py
|
{
"start": 6948,
"end": 8662
}
|
class ____(DisableListEndpoint, UpdateModelMixin, UserSelectViewSet):
"""List, filter, etc, Projects."""
permission_classes = [HasBuildAPIKey | ReadOnlyPermission]
renderer_classes = (JSONRenderer,)
serializer_class = ProjectSerializer
admin_serializer_class = ProjectAdminSerializer
model = Project
pagination_class = ProjectPagination
filterset_fields = ("slug",)
@decorators.action(detail=True)
def translations(self, *_, **__):
translations = self.get_object().translations.all()
return Response(
{
"translations": ProjectSerializer(translations, many=True).data,
}
)
@decorators.action(detail=True)
def subprojects(self, request, **kwargs):
project = self.get_object()
rels = project.subprojects.all()
children = [rel.child for rel in rels]
return Response(
{
"subprojects": ProjectSerializer(children, many=True).data,
}
)
@decorators.action(detail=True)
def active_versions(self, request, **kwargs):
project = self.get_object()
versions = project.versions(manager=INTERNAL).filter(active=True)
return Response(
{
"versions": VersionSerializer(versions, many=True).data,
}
)
@decorators.action(detail=True)
def canonical_url(self, request, **kwargs):
project = self.get_object()
return Response(
{
"url": project.get_docs_url(),
}
)
def get_queryset_for_api_key(self, api_key):
return self.model.objects.filter(pk=api_key.project.pk)
|
ProjectViewSet
|
python
|
huggingface__transformers
|
src/transformers/models/internvl/modeling_internvl.py
|
{
"start": 31831,
"end": 38690
}
|
class ____(InternVLPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: InternVLConfig):
super().__init__(config)
self.model = InternVLModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
**kwargs,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, InternVLCausalLMOutputWithPast]:
r"""
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> torch_device = "cuda"
>>> processor = AutoProcessor.from_pretrained("OpenGVLab/InternVL3-1B-hf")
>>> model = AutoModelForImageTextToText.from_pretrained(
... "OpenGVLab/InternVL3-1B-hf", dtype=torch.bfloat16, device_map=torch_device
... )
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
... },
... {
... "type": "image",
... "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
... },
... {"type": "text", "text": "These images depict two different landmarks. Can you identify them?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(torch_device)
>>> generate_ids = model.generate(**inputs, max_new_tokens=200)
>>> print(processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True))
The images depict the Statue of Liberty and the Golden Gate Bridge.
```"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return InternVLCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = [
"InternVLVisionPreTrainedModel",
"InternVLVisionModel",
"InternVLPreTrainedModel",
"InternVLModel",
"InternVLForConditionalGeneration",
]
|
InternVLForConditionalGeneration
|
python
|
apache__airflow
|
providers/microsoft/azure/tests/unit/microsoft/azure/transfers/test_sftp_to_wasb.py
|
{
"start": 1458,
"end": 10416
}
|
class ____:
def test_init(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
assert operator.sftp_source_path == SOURCE_PATH_NO_WILDCARD
assert operator.sftp_conn_id == SFTP_CONN_ID
assert operator.container_name == CONTAINER_NAME
assert operator.wasb_conn_id == WASB_CONN_ID
assert operator.blob_prefix == BLOB_PREFIX
assert operator.create_container is False
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook", autospec=True)
def test_execute_more_than_one_wildcard_exception(self, mock_hook):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
blob_prefix=BLOB_PREFIX,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
with pytest.raises(AirflowException) as err:
operator.check_wildcards_limit()
assert "Only one wildcard '*' is allowed" in str(err.value)
def test_get_sftp_tree_behavior(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == "main_dir", "not matched at expected complete path"
assert prefix == "main_dir/", "Prefix must be EQUAL TO wildcard"
assert delimiter == "", "Delimiter must be empty"
def test_get_sftp_tree_behavior_without_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
sftp_complete_path, prefix, delimiter = operator.get_tree_behavior()
assert sftp_complete_path == "main_dir/", "not matched at expected complete path"
assert prefix is None, "Prefix must be NONE when no wildcard"
assert delimiter is None, "Delimiter must be none"
def test_source_path_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_PATH,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is True, "This path contains a wildpath"
def test_source_path_not_contains_wildcard(self):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
)
output = operator.source_path_contains_wildcard
assert output is False, "This path does not contains a wildpath"
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook")
def test_get_sftp_files_map_no_wildcard(self, sftp_hook, mock_hook):
sftp_hook.return_value.get_tree_map.return_value = [
EXPECTED_FILES,
[],
[],
]
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
files = operator.get_sftp_files_map()
assert len(files) == 1, "no matched at expected found files"
assert files[0].blob_name == EXPECTED_BLOB_NAME, "expected blob name not matched"
@pytest.mark.parametrize(argnames="create_container", argvalues=[True, False])
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook")
def test_copy_files_to_wasb(self, sftp_hook, mock_hook, create_container):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
create_container=create_container,
)
sftp_files = [SftpFile(EXPECTED_FILES[0], EXPECTED_BLOB_NAME)]
files = operator.copy_files_to_wasb(sftp_files)
operator.sftp_hook.retrieve_file.assert_has_calls([mock.call("main_dir/test_object3.json", mock.ANY)])
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, EXPECTED_BLOB_NAME, create_container, overwrite=False
)
assert len(files) == 1, "no matched at expected uploaded files"
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook")
def test_delete_files(self, sftp_hook):
sftp_mock = sftp_hook.return_value
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=SOURCE_PATH_NO_WILDCARD,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
)
sftp_file_paths = EXPECTED_FILES
operator.delete_files(sftp_file_paths)
sftp_mock.delete_file.assert_has_calls([mock.call(EXPECTED_FILES[0])])
@pytest.mark.parametrize(argnames="create_container", argvalues=[True, False])
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook")
def test_execute(self, sftp_hook, mock_hook, create_container):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=False,
create_container=create_container,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, "test_object.json", create_container, overwrite=False
)
sftp_hook.return_value.delete_file.assert_not_called()
@pytest.mark.parametrize(argnames="create_container", argvalues=[True, False])
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.WasbHook")
@mock.patch("airflow.providers.microsoft.azure.transfers.sftp_to_wasb.SFTPHook")
def test_execute_moved_files(self, sftp_hook, mock_hook, create_container):
operator = SFTPToWasbOperator(
task_id=TASK_ID,
sftp_source_path=WILDCARD_FILE_NAME,
sftp_conn_id=SFTP_CONN_ID,
container_name=CONTAINER_NAME,
wasb_conn_id=WASB_CONN_ID,
move_object=True,
blob_prefix=BLOB_PREFIX,
create_container=create_container,
)
sftp_hook.return_value.get_tree_map.return_value = [
["main_dir/test_object.json"],
[],
[],
]
operator.execute(None)
sftp_hook.return_value.get_tree_map.assert_called_with(
"main_dir", prefix="main_dir/test_object", delimiter=".json"
)
sftp_hook.return_value.retrieve_file.assert_has_calls(
[mock.call("main_dir/test_object.json", mock.ANY)]
)
mock_hook.return_value.load_file.assert_called_once_with(
mock.ANY, CONTAINER_NAME, BLOB_PREFIX + "test_object.json", create_container, overwrite=False
)
assert sftp_hook.return_value.delete_file.called is True, "File must be moved"
|
TestSFTPToWasbOperator
|
python
|
huggingface__transformers
|
src/transformers/trainer_pt_utils.py
|
{
"start": 9778,
"end": 11015
}
|
class ____(DistributedSampler):
"""
Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled
samples to make each process have a round multiple of batch_size samples.
Args:
dataset (`torch.utils.data.Dataset`):
Dataset used for sampling.
batch_size (`int`):
The batch size used with this sampler
kwargs (`dict[str, Any]`, *optional*):
All other keyword arguments passed to `DistributedSampler`.
"""
def __init__(self, dataset, batch_size, **kwargs):
super().__init__(dataset, **kwargs)
self.batch_size = batch_size
def __iter__(self):
indices = list(super().__iter__())
remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size
# DistributedSampler already added samples from the beginning to make the number of samples a round multiple
# of the world size, so we skip those.
start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0
indices += indices[start_remainder : start_remainder + remainder]
return iter(indices)
|
DistributedSamplerWithLoop
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/sparse_ops/sparse_add_op_test.py
|
{
"start": 1672,
"end": 10695
}
|
class ____(test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def testAddSelf(self):
with test_util.force_cpu():
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
self.assertAllEqual((3, 3), sp_sum.get_shape())
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSmallValuesShouldVanish(self):
with test_util.force_cpu():
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
# only .1 vanishes
sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = self.evaluate(sp_sum)
self.assertEqual(sp_sum.dense_shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.dense_shape, [3, 3])
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = sparse_ops.sparse_add(sp_a, sp_b)
nnz_sum = len(self.evaluate(sp_sum.values))
err = gradient_checker.compute_gradient_error(
[sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values,
(nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with test_util.force_cpu():
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = self.evaluate(
sparse_ops.sparse_add(sparse, constant_op.constant(dense_np)))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = self.evaluate(
sparse_ops.sparse_add(constant_op.constant(dense_np), sparse))
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
@test_util.run_deprecated_v1
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = constant_op.constant(dense_np, dtype=dtypes.float32)
s = sparse_ops.sparse_add(sparse, dense)
err = gradient_checker.compute_gradient_error([sparse.values, dense],
[(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
def testInvalidSparseTensor(self):
with test_util.force_cpu():
shape = [2, 2]
val = [0]
dense = constant_op.constant(np.zeros(shape, dtype=np.int32))
for bad_idx in [
[[-1, 0]], # -1 is invalid.
[[1, 3]], # ...so is 3.
]:
sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
with self.assertRaisesRegex(
(ValueError, errors_impl.InvalidArgumentError), "invalid index"):
s = sparse_ops.sparse_add(sparse, dense)
self.evaluate(s)
def _testSparseDenseInvalidInputs(self,
a_indices,
a_values,
a_shape,
b,
expected_error=""):
# Public API call to sparse-dense add.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
expected_error):
a = sparse_tensor.SparseTensor(a_indices, a_values, a_shape)
self.evaluate(sparse_ops.sparse_add(a, b))
# Directly call generated kernel, by-passing SparseTensor validation.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
expected_error):
self.evaluate(
sparse_ops.gen_sparse_ops.sparse_tensor_dense_add(
a_indices, a_values, a_shape, b))
def testSparseDenseInvalidInputs(self):
self._testSparseDenseInvalidInputs(
a_indices=constant_op.constant(0, shape=[17, 2], dtype=dtypes.int64),
a_values=constant_op.constant(0, shape=[5], dtype=dtypes.float32),
a_shape=constant_op.constant([3, 4], dtype=dtypes.int64),
b=constant_op.constant(1, shape=[3, 4], dtype=dtypes.float32),
expected_error="Dimensions 17 and 5 are not compatible")
self._testSparseDenseInvalidInputs(
a_indices=constant_op.constant(0, shape=[17, 4], dtype=dtypes.int64),
a_values=constant_op.constant(0, shape=[17], dtype=dtypes.float32),
a_shape=constant_op.constant([3, 4], dtype=dtypes.int64),
b=constant_op.constant(1, shape=[3, 4], dtype=dtypes.float32),
expected_error="Dimensions 4 and 2 are not compatible")
self._testSparseDenseInvalidInputs(
a_indices=constant_op.constant(7, shape=[17, 2], dtype=dtypes.int64),
a_values=constant_op.constant(0, shape=[17], dtype=dtypes.float32),
a_shape=constant_op.constant([3, 4], dtype=dtypes.int64),
b=constant_op.constant(1, shape=[3, 4], dtype=dtypes.float32),
expected_error="invalid index")
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with session.Session(graph=ops.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = math_ops.add(
sparse_ops.sparse_tensor_to_dense(sp_t), constant_op.constant(vals))
sa = sparse_ops.sparse_add(sp_t, constant_op.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
|
SparseAddTest
|
python
|
miyuchina__mistletoe
|
mistletoe/block_token.py
|
{
"start": 4221,
"end": 5436
}
|
class ____(BlockToken):
"""
Document token.
This is a container block token. Its children are block tokens - container or leaf ones.
Attributes:
footnotes (dictionary): link reference definitions.
"""
def __init__(self, lines: Union[str, Iterable[str]]):
"""
Instantiates this token and its content by parsing the input lines.
Args:
lines: input markdown to be tokenized. If a string is provided,
it will be split into lines.
CAUTION: If the input lines end with Windows line endings (``\\r\\n``),
the parsing process will not work correctly. For performance reasons,
clients need to normalize such line endings themselves, before passing
them to this function, e.g. by calling ``lines.replace('\\r', '')``.
"""
if isinstance(lines, str):
lines = lines.splitlines(keepends=True)
lines = [line if line.endswith('\n') else '{}\n'.format(line) for line in lines]
self.footnotes = {}
self.line_number = 1
token._root_node = self
self.children = tokenize(lines)
token._root_node = None
|
Document
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_logentropy_model.py
|
{
"start": 485,
"end": 2765
}
|
class ____(unittest.TestCase):
TEST_CORPUS = [[(1, 1.0)], [], [(0, 0.5), (2, 1.0)], []]
def setUp(self):
self.corpus_small = MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = MmCorpus(datapath('test_corpus_ok.mm'))
self.corpus_empty = []
def test_generator_fail(self):
"""Test creating a model using a generator as input; should fail."""
def get_generator(test_corpus=TestLogEntropyModel.TEST_CORPUS):
for test_doc in test_corpus:
yield test_doc
self.assertRaises(ValueError, logentropy_model.LogEntropyModel, corpus=get_generator())
def test_empty_fail(self):
"""Test creating a model using an empty input; should fail."""
self.assertRaises(ValueError, logentropy_model.LogEntropyModel, corpus=self.corpus_empty)
def test_transform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [
(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)
]
self.assertTrue(np.allclose(transformed, expected))
def test_persistence(self):
fname = get_tmpfile('gensim_models_logentry.tst')
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models_logentry.tst.gz')
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname, mmap=None)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
TestLogEntropyModel
|
python
|
coleifer__peewee
|
tests/reflection.py
|
{
"start": 1237,
"end": 1315
}
|
class ____(TestModel):
_id = AutoField()
_name = CharField()
|
Underscores
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/check/functions.py
|
{
"start": 50046,
"end": 50096
}
|
class ____(CheckError):
pass
|
ParameterCheckError
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/dtype/npy/string.py
|
{
"start": 1900,
"end": 2409
}
|
class ____(NamedConfig[Literal["fixed_length_utf32"], LengthBytesConfig]):
"""
The JSON representation of the ``FixedLengthUTF32`` data type in Zarr V3.
References
----------
This representation is not currently defined in an external specification.
Examples
--------
```python
{
"name": "fixed_length_utf32",
"configuration": {
"length_bytes": 12
}
}
```
"""
@dataclass(frozen=True, kw_only=True)
|
FixedLengthUTF32JSON_V3
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 12522,
"end": 13024
}
|
class ____(object):
"""*
jina gRPC service to expose Endpoints from Executors.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.dry_run = channel.unary_unary(
'/jina.JinaGatewayDryRunRPC/dry_run',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=jina__pb2.StatusProto.FromString,
)
|
JinaGatewayDryRunRPCStub
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/api/snapshots_api.py
|
{
"start": 21045,
"end": 27711
}
|
class ____(_SnapshotsApi):
def create_full_snapshot(
self,
wait: bool = None,
) -> m.InlineResponse20011:
"""
Create new snapshot of the whole storage
"""
return self._build_for_create_full_snapshot(
wait=wait,
)
def create_shard_snapshot(
self,
collection_name: str,
shard_id: int,
wait: bool = None,
) -> m.InlineResponse20011:
"""
Create new snapshot of a shard for a collection
"""
return self._build_for_create_shard_snapshot(
collection_name=collection_name,
shard_id=shard_id,
wait=wait,
)
def create_snapshot(
self,
collection_name: str,
wait: bool = None,
) -> m.InlineResponse20011:
"""
Create new snapshot for a collection
"""
return self._build_for_create_snapshot(
collection_name=collection_name,
wait=wait,
)
def delete_full_snapshot(
self,
snapshot_name: str,
wait: bool = None,
) -> m.InlineResponse2009:
"""
Delete snapshot of the whole storage
"""
return self._build_for_delete_full_snapshot(
snapshot_name=snapshot_name,
wait=wait,
)
def delete_shard_snapshot(
self,
collection_name: str,
shard_id: int,
snapshot_name: str,
wait: bool = None,
) -> m.InlineResponse2009:
"""
Delete snapshot of a shard for a collection
"""
return self._build_for_delete_shard_snapshot(
collection_name=collection_name,
shard_id=shard_id,
snapshot_name=snapshot_name,
wait=wait,
)
def delete_snapshot(
self,
collection_name: str,
snapshot_name: str,
wait: bool = None,
) -> m.InlineResponse2009:
"""
Delete snapshot for a collection
"""
return self._build_for_delete_snapshot(
collection_name=collection_name,
snapshot_name=snapshot_name,
wait=wait,
)
def get_full_snapshot(
self,
snapshot_name: str,
) -> file:
"""
Download specified snapshot of the whole storage as a file
"""
return self._build_for_get_full_snapshot(
snapshot_name=snapshot_name,
)
def get_shard_snapshot(
self,
collection_name: str,
shard_id: int,
snapshot_name: str,
) -> file:
"""
Download specified snapshot of a shard from a collection as a file
"""
return self._build_for_get_shard_snapshot(
collection_name=collection_name,
shard_id=shard_id,
snapshot_name=snapshot_name,
)
def get_snapshot(
self,
collection_name: str,
snapshot_name: str,
) -> file:
"""
Download specified snapshot from a collection as a file
"""
return self._build_for_get_snapshot(
collection_name=collection_name,
snapshot_name=snapshot_name,
)
def list_full_snapshots(
self,
) -> m.InlineResponse20010:
"""
Get list of snapshots of the whole storage
"""
return self._build_for_list_full_snapshots()
def list_shard_snapshots(
self,
collection_name: str,
shard_id: int,
) -> m.InlineResponse20010:
"""
Get list of snapshots for a shard of a collection
"""
return self._build_for_list_shard_snapshots(
collection_name=collection_name,
shard_id=shard_id,
)
def list_snapshots(
self,
collection_name: str,
) -> m.InlineResponse20010:
"""
Get list of snapshots for a collection
"""
return self._build_for_list_snapshots(
collection_name=collection_name,
)
def recover_from_snapshot(
self,
collection_name: str,
wait: bool = None,
snapshot_recover: m.SnapshotRecover = None,
) -> m.InlineResponse2009:
"""
Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created.
"""
return self._build_for_recover_from_snapshot(
collection_name=collection_name,
wait=wait,
snapshot_recover=snapshot_recover,
)
def recover_from_uploaded_snapshot(
self,
collection_name: str,
wait: bool = None,
priority: SnapshotPriority = None,
checksum: str = None,
snapshot: IO[Any] = None,
) -> m.InlineResponse2009:
"""
Recover local collection data from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created.
"""
return self._build_for_recover_from_uploaded_snapshot(
collection_name=collection_name,
wait=wait,
priority=priority,
checksum=checksum,
snapshot=snapshot,
)
def recover_shard_from_snapshot(
self,
collection_name: str,
shard_id: int,
wait: bool = None,
shard_snapshot_recover: m.ShardSnapshotRecover = None,
) -> m.InlineResponse2009:
"""
Recover shard of a local collection data from a snapshot. This will overwrite any data, stored in this shard, for the collection.
"""
return self._build_for_recover_shard_from_snapshot(
collection_name=collection_name,
shard_id=shard_id,
wait=wait,
shard_snapshot_recover=shard_snapshot_recover,
)
def recover_shard_from_uploaded_snapshot(
self,
collection_name: str,
shard_id: int,
wait: bool = None,
priority: SnapshotPriority = None,
checksum: str = None,
snapshot: IO[Any] = None,
) -> m.InlineResponse2009:
"""
Recover shard of a local collection from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection shard.
"""
return self._build_for_recover_shard_from_uploaded_snapshot(
collection_name=collection_name,
shard_id=shard_id,
wait=wait,
priority=priority,
checksum=checksum,
snapshot=snapshot,
)
|
SyncSnapshotsApi
|
python
|
plotly__plotly.py
|
plotly/graph_objs/choroplethmap/colorbar/_tickfont.py
|
{
"start": 233,
"end": 9949
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmap.colorbar"
_path_str = "choroplethmap.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmap.
colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmap.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickfont
|
python
|
lxml__lxml
|
src/lxml/html/tests/test_html5parser.py
|
{
"start": 4986,
"end": 6924
}
|
class ____(unittest.TestCase):
def call_it(self, *args, **kwargs):
if html5lib is None:
raise unittest.SkipTest("html5lib is not installed")
from lxml.html.html5parser import fragment_fromstring
return fragment_fromstring(*args, **kwargs)
def test_basic(self):
element = DummyElement()
parser = DummyParser(fragments=[element])
self.assertEqual(self.call_it('html', parser=parser), element)
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
def test_create_parent(self):
parser = DummyParser(fragments=['head', Element('child')])
elem = self.call_it('html', parser=parser, create_parent='parent')
self.assertEqual(elem.tag, 'parent')
self.assertEqual(elem.text, 'head')
self.assertEqual(elem[0].tag, 'child')
def test_create_parent_default_type_no_ns(self):
parser = DummyParser(fragments=[], namespaceHTMLElements=False)
elem = self.call_it('html', parser=parser, create_parent=True)
self.assertEqual(elem.tag, 'div')
def test_raises_error_on_leading_text(self):
parser = DummyParser(fragments=['leading text'])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_no_elements_found(self):
parser = DummyParser(fragments=[])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_multiple_elements_found(self):
parser = DummyParser(fragments=[DummyElement(), DummyElement()])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_tail(self):
parser = DummyParser(fragments=[DummyElement(tail='tail')])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
|
Test_fragment_fromstring
|
python
|
ansible__ansible
|
test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py
|
{
"start": 84,
"end": 432
}
|
class ____(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset()
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(None, task_vars)
result = dict(changed=False, args_in=self._task.args)
return result
|
ActionModule
|
python
|
redis__redis-py
|
redis/asyncio/connection.py
|
{
"start": 1834,
"end": 2114
}
|
class ____(enum.Enum):
sentinel = object()
SENTINEL = _Sentinel.sentinel
DefaultParser: Type[Union[_AsyncRESP2Parser, _AsyncRESP3Parser, _AsyncHiredisParser]]
if HIREDIS_AVAILABLE:
DefaultParser = _AsyncHiredisParser
else:
DefaultParser = _AsyncRESP2Parser
|
_Sentinel
|
python
|
ray-project__ray
|
python/ray/tune/examples/pbt_dcgan_mnist/common.py
|
{
"start": 1834,
"end": 2566
}
|
class ____(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh(),
)
def forward(self, input):
return self.main(input)
|
Generator
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/schedules.py
|
{
"start": 11182,
"end": 18254
}
|
class ____(PrefectBaseModel):
"""
Cron schedule
NOTE: If the timezone is a DST-observing one, then the schedule will adjust
itself appropriately. Cron's rules for DST are based on schedule times, not
intervals. This means that an hourly cron schedule will fire on every new
schedule hour, not every elapsed hour; for example, when clocks are set back
this will result in a two-hour pause as the schedule will fire *the first
time* 1am is reached and *the first time* 2am is reached, 120 minutes later.
Longer schedules, such as one that fires at 9am every morning, will
automatically adjust for DST.
Args:
cron (str): a valid cron string
timezone (str): a valid timezone string in IANA tzdata format (for example,
America/New_York).
day_or (bool, optional): Control how croniter handles `day` and `day_of_week`
entries. Defaults to True, matching cron which connects those values using
OR. If the switch is set to False, the values are connected using AND. This
behaves like fcron and enables you to e.g. define a job that executes each
2nd friday of a month by setting the days of month and the weekday.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
cron: str = Field(default=..., examples=["0 0 * * *"])
timezone: Optional[str] = Field(default=None, examples=["America/New_York"])
day_or: bool = Field(
default=True,
description=(
"Control croniter behavior for handling day and day_of_week entries."
),
)
@model_validator(mode="after")
def validate_timezone(self):
self.timezone = default_timezone(self.timezone, self.model_dump())
return self
@field_validator("cron")
@classmethod
def valid_cron_string(cls, v: str) -> str:
return validate_cron_string(v)
async def get_dates(
self,
n: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
) -> List[DateTime]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (int): The number of dates to generate
start (datetime.datetime, optional): The first returned date will be on or
after this date. Defaults to None. If a timezone-naive datetime is
provided, it is assumed to be in the schedule's timezone.
end (datetime.datetime, optional): The maximum scheduled date to return. If
a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: A list of dates
"""
return sorted(self._get_dates_generator(n=n, start=start, end=end))
def _get_dates_generator(
self,
n: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
) -> Generator[DateTime, None, None]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (int): The number of dates to generate
start (datetime.datetime, optional): The first returned date will be on or
after this date. Defaults to the current date. If a timezone-naive
datetime is provided, it is assumed to be in the schedule's timezone.
end (datetime.datetime, optional): No returned date will exceed this date.
If a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: a list of dates
"""
if start is None:
start = now("UTC")
start, end = _prepare_scheduling_start_and_end(start, end, self.timezone)
if n is None:
# if an end was supplied, we do our best to supply all matching dates (up to
# MAX_ITERATIONS)
if end is not None:
n = MAX_ITERATIONS
else:
n = 1
if self.timezone:
if sys.version_info >= (3, 13):
start = start.astimezone(ZoneInfo(self.timezone or "UTC"))
else:
start = start.in_tz(self.timezone)
# subtract one second from the start date, so that croniter returns it
# as an event (if it meets the cron criteria)
start = start - datetime.timedelta(seconds=1)
# Respect microseconds by rounding up
if start.microsecond > 0:
start += datetime.timedelta(seconds=1)
# croniter's DST logic interferes with all other datetime libraries except pytz
if sys.version_info >= (3, 13):
start_localized = start.astimezone(ZoneInfo(self.timezone or "UTC"))
start_naive_tz = start.replace(tzinfo=None)
else:
start_localized = pytz.timezone(start.tz.name).localize(
datetime.datetime(
year=start.year,
month=start.month,
day=start.day,
hour=start.hour,
minute=start.minute,
second=start.second,
microsecond=start.microsecond,
)
)
start_naive_tz = start.naive()
cron = croniter(self.cron, start_naive_tz, day_or=self.day_or) # type: ignore
dates = set()
counter = 0
while True:
# croniter does not handle DST properly when the start time is
# in and around when the actual shift occurs. To work around this,
# we use the naive start time to get the next cron date delta, then
# add that time to the original scheduling anchor.
next_time = cron.get_next(datetime.datetime)
delta = next_time - start_naive_tz
if sys.version_info >= (3, 13):
from whenever import ZonedDateTime
# Use `whenever` to handle DST correctly
next_date = (
ZonedDateTime.from_py_datetime(start_localized + delta)
.to_tz(self.timezone or "UTC")
.py_datetime()
)
else:
next_date = create_datetime_instance(start_localized + delta)
# if the end date was exceeded, exit
if end and next_date > end:
break
# ensure no duplicates; weird things can happen with DST
if next_date not in dates:
dates.add(next_date)
yield next_date
# if enough dates have been collected or enough attempts were made, exit
if len(dates) >= n or counter > MAX_ITERATIONS:
break
counter += 1
DEFAULT_ANCHOR_DATE = datetime.date(2020, 1, 1)
|
CronSchedule
|
python
|
imageio__imageio
|
imageio/plugins/_tifffile.py
|
{
"start": 25001,
"end": 71110
}
|
class ____(object):
"""Write numpy arrays to TIFF file.
TiffWriter instances must be closed using the 'close' method, which is
automatically called when using the 'with' context manager.
TiffWriter's main purpose is saving nD numpy array's as TIFF,
not to create any possible TIFF format. Specifically, JPEG compression,
SubIFDs, ExifIFD, or GPSIFD tags are not supported.
Examples
--------
>>> # successively append images to BigTIFF file
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6, photometric='minisblack')
"""
def __init__(self, file, bigtiff=False, byteorder=None, append=False, imagej=False):
"""Open a TIFF file for writing.
An empty TIFF file is created if the file does not exist, else the
file is overwritten with an empty TIFF file unless 'append'
is true. Use bigtiff=True when creating files larger than 4 GB.
Parameters
----------
file : str, binary stream, or FileHandle
File name or writable binary stream, such as an open file
or BytesIO.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>', '=', '|'}
The endianness of the data in the file.
By default, this is the system's native byte order.
append : bool
If True and 'file' is an existing standard TIFF file, image data
and tags are appended to the file.
Appending data may corrupt specifically formatted TIFF files
such as LSM, STK, ImageJ, NIH, or FluoView.
imagej : bool
If True, write an ImageJ hyperstack compatible file.
This format can handle data types uint8, uint16, or float32 and
data shapes up to 6 dimensions in TZCYXS order.
RGB images (S=3 or S=4) must be uint8.
ImageJ's default byte order is big-endian but this implementation
uses the system's native byte order by default.
ImageJ does not support BigTIFF format or LZMA compression.
The ImageJ file format is undocumented.
"""
if append:
# determine if file is an existing TIFF file that can be extended
try:
with FileHandle(file, mode="rb", size=0) as fh:
pos = fh.tell()
try:
with TiffFile(fh) as tif:
if append != "force" and any(
getattr(tif, "is_" + a)
for a in (
"lsm",
"stk",
"imagej",
"nih",
"fluoview",
"micromanager",
)
):
raise ValueError("file contains metadata")
byteorder = tif.byteorder
bigtiff = tif.is_bigtiff
self._ifdoffset = tif.pages.next_page_offset
except Exception as e:
raise ValueError("cannot append to file: %s" % str(e))
finally:
fh.seek(pos)
except (IOError, FileNotFoundError):
append = False
if byteorder in (None, "=", "|"):
byteorder = "<" if sys.byteorder == "little" else ">"
elif byteorder not in ("<", ">"):
raise ValueError("invalid byteorder %s" % byteorder)
if imagej and bigtiff:
warnings.warn("writing incompatible BigTIFF ImageJ")
self._byteorder = byteorder
self._imagej = bool(imagej)
self._truncate = False
self._metadata = None
self._colormap = None
self._descriptionoffset = 0
self._descriptionlen = 0
self._descriptionlenoffset = 0
self._tags = None
self._shape = None # normalized shape of data in consecutive pages
self._datashape = None # shape of data in consecutive pages
self._datadtype = None # data type
self._dataoffset = None # offset to data
self._databytecounts = None # byte counts per plane
self._tagoffsets = None # strip or tile offset tag code
if bigtiff:
self._bigtiff = True
self._offsetsize = 8
self._tagsize = 20
self._tagnoformat = "Q"
self._offsetformat = "Q"
self._valueformat = "8s"
else:
self._bigtiff = False
self._offsetsize = 4
self._tagsize = 12
self._tagnoformat = "H"
self._offsetformat = "I"
self._valueformat = "4s"
if append:
self._fh = FileHandle(file, mode="r+b", size=0)
self._fh.seek(0, 2)
else:
self._fh = FileHandle(file, mode="wb", size=0)
self._fh.write({"<": b"II", ">": b"MM"}[byteorder])
if bigtiff:
self._fh.write(struct.pack(byteorder + "HHH", 43, 8, 0))
else:
self._fh.write(struct.pack(byteorder + "H", 42))
# first IFD
self._ifdoffset = self._fh.tell()
self._fh.write(struct.pack(byteorder + self._offsetformat, 0))
def save(
self,
data=None,
shape=None,
dtype=None,
returnoffset=False,
photometric=None,
planarconfig=None,
tile=None,
contiguous=True,
align=16,
truncate=False,
compress=0,
rowsperstrip=None,
predictor=False,
colormap=None,
description=None,
datetime=None,
resolution=None,
software="tifffile.py",
metadata={},
ijmetadata=None,
extratags=(),
):
"""Write numpy array and tags to TIFF file.
The data shape's last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the data's dtype must be uint8 or uint16
and the data values are indices into the last dimension of the
colormap.
If 'shape' and 'dtype' are specified, an empty array is saved.
This option cannot be used with compression or multiple tiles.
Image data are written uncompressed in one strip per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The SampleFormat and BitsPerSample tags are derived from the data type.
Parameters
----------
data : numpy.ndarray or None
Input image array.
shape : tuple or None
Shape of the empty array to save. Used only if 'data' is None.
dtype : numpy.dtype or None
Data-type of the empty array to save. Used only if 'data' is None.
returnoffset : bool
If True and the image data in the file is memory-mappable, return
the offset and number of bytes of the image data in the file.
photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'}
The color space of the image data.
By default, this setting is inferred from the data shape and the
value of colormap.
For CFA images, DNG tags must be specified in 'extratags'.
planarconfig : {'CONTIG', 'SEPARATE'}
Specifies if samples are stored contiguous or in separate planes.
By default, this setting is inferred from the data shape.
If this parameter is set, extra samples are used to store grayscale
images.
'CONTIG': last dimension contains samples.
'SEPARATE': third last dimension contains samples.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in strips.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI ImageDepth and TileDepth
tags are used to save volume data.
Unless a single tile is used, tiles cannot be used to write
contiguous files.
Few software can read the SGI format, e.g. MeVisLab.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the image data are stored contiguously after
the previous one. Parameters 'photometric' and 'planarconfig'
are ignored. Parameters 'description', datetime', and 'extratags'
are written to the first page of a contiguous series only.
align : int
Byte boundary on which to align the image data in the file.
Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data.
Following contiguous writes are not aligned.
truncate : bool
If True, only write the first page including shape metadata if
possible (uncompressed, contiguous, not tiled).
Other TIFF readers will only be able to read part of the data.
compress : int or 'LZMA', 'ZSTD'
Values from 0 to 9 controlling the level of zlib compression.
If 0 (default), data are written uncompressed.
Compression cannot be used to write contiguous files.
If 'LZMA' or 'ZSTD', LZMA or ZSTD compression is used, which is
not available on all platforms.
rowsperstrip : int
The number of rows per strip used for compression.
Uncompressed data are written in one strip per plane.
predictor : bool
If True, apply horizontal differencing to integer type images
before compression.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
description : str
The subject of the image. Must be 7-bit ASCII. Cannot be used with
the ImageJ format. Saved with the first page only.
datetime : datetime
Date and time of image creation in '%Y:%m:%d %H:%M:%S' format.
If None (default), the current date and time is used.
Saved with the first page only.
resolution : (float, float[, str]) or ((int, int), (int, int)[, str])
X and Y resolutions in pixels per resolution unit as float or
rational numbers. A third, optional parameter specifies the
resolution unit, which must be None (default for ImageJ),
'INCH' (default), or 'CENTIMETER'.
software : str
Name of the software used to create the file. Must be 7-bit ASCII.
Saved with the first page only.
metadata : dict
Additional meta data to be saved along with shape information
in JSON or ImageJ formats in an ImageDescription tag.
If None, do not write a second ImageDescription tag.
Strings must be 7-bit ASCII. Saved with the first page only.
ijmetadata : dict
Additional meta data to be saved in application specific
IJMetadata and IJMetadataByteCounts tags. Refer to the
imagej_metadata_tags function for valid keys and values.
Saved with the first page only.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q.
count : int
Number of data values. Not used for string or byte string
values.
value : sequence
'Count' values compatible with 'dtype'.
Byte strings must contain count values of dtype packed as
binary data.
writeonce : bool
If True, the tag is written to the first page only.
"""
# TODO: refactor this function
fh = self._fh
byteorder = self._byteorder
if data is None:
if compress:
raise ValueError("cannot save compressed empty file")
datashape = shape
datadtype = numpy.dtype(dtype).newbyteorder(byteorder)
datadtypechar = datadtype.char
else:
data = numpy.asarray(data, byteorder + data.dtype.char, "C")
if data.size == 0:
raise ValueError("cannot save empty array")
datashape = data.shape
datadtype = data.dtype
datadtypechar = data.dtype.char
returnoffset = returnoffset and datadtype.isnative
bilevel = datadtypechar == "?"
if bilevel:
index = -1 if datashape[-1] > 1 else -2
datasize = product(datashape[:index])
if datashape[index] % 8:
datasize *= datashape[index] // 8 + 1
else:
datasize *= datashape[index] // 8
else:
datasize = product(datashape) * datadtype.itemsize
# just append contiguous data if possible
self._truncate = bool(truncate)
if self._datashape:
if (
not contiguous
or self._datashape[1:] != datashape
or self._datadtype != datadtype
or (compress and self._tags)
or tile
or not numpy.array_equal(colormap, self._colormap)
):
# incompatible shape, dtype, compression mode, or colormap
self._write_remaining_pages()
self._write_image_description()
self._truncate = False
self._descriptionoffset = 0
self._descriptionlenoffset = 0
self._datashape = None
self._colormap = None
if self._imagej:
raise ValueError("ImageJ does not support non-contiguous data")
else:
# consecutive mode
self._datashape = (self._datashape[0] + 1,) + datashape
if not compress:
# write contiguous data, write IFDs/tags later
offset = fh.tell()
if data is None:
fh.write_empty(datasize)
else:
fh.write_array(data)
if returnoffset:
return offset, datasize
return
input_shape = datashape
tagnoformat = self._tagnoformat
valueformat = self._valueformat
offsetformat = self._offsetformat
offsetsize = self._offsetsize
tagsize = self._tagsize
MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK
RGB = TIFF.PHOTOMETRIC.RGB
CFA = TIFF.PHOTOMETRIC.CFA
PALETTE = TIFF.PHOTOMETRIC.PALETTE
CONTIG = TIFF.PLANARCONFIG.CONTIG
SEPARATE = TIFF.PLANARCONFIG.SEPARATE
# parse input
if photometric is not None:
photometric = enumarg(TIFF.PHOTOMETRIC, photometric)
if planarconfig:
planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig)
if not compress:
compress = False
compresstag = 1
predictor = False
else:
if isinstance(compress, (tuple, list)):
compress, compresslevel = compress
elif isinstance(compress, int):
compress, compresslevel = "ADOBE_DEFLATE", int(compress)
if not 0 <= compresslevel <= 9:
raise ValueError("invalid compression level %s" % compress)
else:
compresslevel = None
compress = compress.upper()
compresstag = enumarg(TIFF.COMPRESSION, compress)
# prepare ImageJ format
if self._imagej:
if compress in ("LZMA", "ZSTD"):
raise ValueError("ImageJ cannot handle LZMA or ZSTD compression")
if description:
warnings.warn("not writing description to ImageJ file")
description = None
volume = False
if datadtypechar not in "BHhf":
raise ValueError("ImageJ does not support data type %s" % datadtypechar)
ijrgb = photometric == RGB if photometric else None
if datadtypechar not in "B":
ijrgb = False
ijshape = imagej_shape(datashape, ijrgb)
if ijshape[-1] in (3, 4):
photometric = RGB
if datadtypechar not in "B":
raise ValueError(
"ImageJ does not support data type %s "
"for RGB" % datadtypechar
)
elif photometric is None:
photometric = MINISBLACK
planarconfig = None
if planarconfig == SEPARATE:
raise ValueError("ImageJ does not support planar images")
else:
planarconfig = CONTIG if ijrgb else None
# define compress function
if compress:
if compresslevel is None:
compressor, compresslevel = TIFF.COMPESSORS[compresstag]
else:
compressor, _ = TIFF.COMPESSORS[compresstag]
compresslevel = int(compresslevel)
if predictor:
if datadtype.kind not in "iu":
raise ValueError("prediction not implemented for %s" % datadtype)
def compress(data, level=compresslevel):
# horizontal differencing
diff = numpy.diff(data, axis=-2)
data = numpy.insert(diff, 0, data[..., 0, :], axis=-2)
return compressor(data, level)
else:
def compress(data, level=compresslevel):
return compressor(data, level)
# verify colormap and indices
if colormap is not None:
if datadtypechar not in "BH":
raise ValueError("invalid data dtype for palette mode")
colormap = numpy.asarray(colormap, dtype=byteorder + "H")
if colormap.shape != (3, 2 ** (datadtype.itemsize * 8)):
raise ValueError("invalid color map shape")
self._colormap = colormap
# verify tile shape
if tile:
tile = tuple(int(i) for i in tile[:3])
volume = len(tile) == 3
if (
len(tile) < 2
or tile[-1] % 16
or tile[-2] % 16
or any(i < 1 for i in tile)
):
raise ValueError("invalid tile shape")
else:
tile = ()
volume = False
# normalize data shape to 5D or 6D, depending on volume:
# (pages, planar_samples, [depth,] height, width, contig_samples)
datashape = reshape_nd(datashape, 3 if photometric == RGB else 2)
shape = datashape
ndim = len(datashape)
samplesperpixel = 1
extrasamples = 0
if volume and ndim < 3:
volume = False
if colormap is not None:
photometric = PALETTE
planarconfig = None
if photometric is None:
photometric = MINISBLACK
if bilevel:
photometric = TIFF.PHOTOMETRIC.MINISWHITE
elif planarconfig == CONTIG:
if ndim > 2 and shape[-1] in (3, 4):
photometric = RGB
elif planarconfig == SEPARATE:
if volume and ndim > 3 and shape[-4] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-3] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-1] in (3, 4):
photometric = RGB
elif self._imagej:
photometric = MINISBLACK
elif volume and ndim > 3 and shape[-4] in (3, 4):
photometric = RGB
elif ndim > 2 and shape[-3] in (3, 4):
photometric = RGB
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = MINISBLACK
if photometric == RGB:
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = CONTIG
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = SEPARATE
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = SEPARATE
else:
planarconfig = CONTIG
if planarconfig == CONTIG:
datashape = (-1, 1) + shape[(-4 if volume else -3) :]
samplesperpixel = datashape[-1]
else:
datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,)
samplesperpixel = datashape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif photometric == CFA:
if len(shape) != 2:
raise ValueError("invalid CFA image")
volume = False
planarconfig = None
datashape = (-1, 1) + shape[-2:] + (1,)
if 50706 not in (et[0] for et in extratags):
raise ValueError("must specify DNG tags for CFA image")
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == CONTIG:
datashape = (-1, 1) + shape[(-4 if volume else -3) :]
samplesperpixel = datashape[-1]
else:
datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,)
samplesperpixel = datashape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
datashape = (-1, 1) + shape[(-3 if volume else -2) :] + (1,)
# normalize shape to 6D
assert len(datashape) in (5, 6)
if len(datashape) == 5:
datashape = datashape[:2] + (1,) + datashape[2:]
if datashape[0] == -1:
s0 = product(input_shape) // product(datashape[1:])
datashape = (s0,) + datashape[1:]
shape = datashape
if data is not None:
data = data.reshape(shape)
if tile and not volume:
tile = (1, tile[-2], tile[-1])
if photometric == PALETTE:
if samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1:
raise ValueError("invalid data shape for palette mode")
if photometric == RGB and samplesperpixel == 2:
raise ValueError("not a RGB image (samplesperpixel=2)")
if bilevel:
if compress:
raise ValueError("cannot save compressed bilevel image")
if tile:
raise ValueError("cannot save tiled bilevel image")
if photometric not in (0, 1):
raise ValueError("cannot save bilevel image as %s" % str(photometric))
datashape = list(datashape)
if datashape[-2] % 8:
datashape[-2] = datashape[-2] // 8 + 1
else:
datashape[-2] = datashape[-2] // 8
datashape = tuple(datashape)
assert datasize == product(datashape)
if data is not None:
data = numpy.packbits(data, axis=-2)
assert datashape[-2] == data.shape[-2]
bytestr = (
bytes
if sys.version[0] == "2"
else (lambda x: bytes(x, "ascii") if isinstance(x, str) else x)
)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
strip_or_tile = "Tile" if tile else "Strip"
tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + "ByteCounts"]
tag_offsets = TIFF.TAG_NAMES[strip_or_tile + "Offsets"]
self._tagoffsets = tag_offsets
def pack(fmt, *val):
return struct.pack(byteorder + fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value
# Append (code, ifdentry, ifdvalue, writeonce) to tags list
code = int(TIFF.TAG_NAMES.get(code, code))
try:
tifftype = TIFF.DATA_DTYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == "s":
# strings
value = bytestr(value) + b"\0"
count = rawcount = len(value)
rawcount = value.find(b"\0\0")
if rawcount < 0:
rawcount = count
else:
rawcount += 1 # length of string without buffer
value = (value,)
elif isinstance(value, bytes):
# packed binary data
dtsize = struct.calcsize(dtype)
if len(value) % dtsize:
raise ValueError("invalid packed binary data")
count = len(value) // dtsize
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack("HH", code, tifftype), pack(offsetformat, rawcount)]
ifdvalue = None
if struct.calcsize(dtype) * count <= offsetsize:
# value(s) can be written directly
if isinstance(value, bytes):
ifdentry.append(pack(valueformat, value))
elif count == 1:
if isinstance(value, (tuple, list, numpy.ndarray)):
value = value[0]
ifdentry.append(pack(valueformat, pack(dtype, value)))
else:
ifdentry.append(pack(valueformat, pack(str(count) + dtype, *value)))
else:
# use offset to value(s)
ifdentry.append(pack(offsetformat, 0))
if isinstance(value, bytes):
ifdvalue = value
elif isinstance(value, numpy.ndarray):
assert value.size == count
assert value.dtype.char == dtype
ifdvalue = value.tostring()
elif isinstance(value, (tuple, list)):
ifdvalue = pack(str(count) + dtype, *value)
else:
ifdvalue = pack(dtype, value)
tags.append((code, b"".join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
""" "Return nominator and denominator from float or two integers."""
from fractions import Fraction # delayed import
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if description:
# user provided description
addtag("ImageDescription", "s", 0, description, writeonce=True)
# write shape and metadata to ImageDescription
self._metadata = {} if not metadata else metadata.copy()
if self._imagej:
description = imagej_description(
input_shape,
shape[-1] in (3, 4),
self._colormap is not None,
**self._metadata,
)
elif metadata or metadata == {}:
if self._truncate:
self._metadata.update(truncated=True)
description = json_description(input_shape, **self._metadata)
else:
description = None
if description:
# add 64 bytes buffer
# the image description might be updated later with the final shape
description = str2bytes(description, "ascii")
description += b"\0" * 64
self._descriptionlen = len(description)
addtag("ImageDescription", "s", 0, description, writeonce=True)
if software:
addtag("Software", "s", 0, software, writeonce=True)
if datetime is None:
datetime = self._now()
addtag(
"DateTime", "s", 0, datetime.strftime("%Y:%m:%d %H:%M:%S"), writeonce=True
)
addtag("Compression", "H", 1, compresstag)
if predictor:
addtag("Predictor", "H", 1, 2)
addtag("ImageWidth", "I", 1, shape[-2])
addtag("ImageLength", "I", 1, shape[-3])
if tile:
addtag("TileWidth", "I", 1, tile[-1])
addtag("TileLength", "I", 1, tile[-2])
if tile[0] > 1:
addtag("ImageDepth", "I", 1, shape[-4])
addtag("TileDepth", "I", 1, tile[0])
addtag("NewSubfileType", "I", 1, 0)
if not bilevel:
sampleformat = {"u": 1, "i": 2, "f": 3, "c": 6}[datadtype.kind]
addtag(
"SampleFormat", "H", samplesperpixel, (sampleformat,) * samplesperpixel
)
addtag("PhotometricInterpretation", "H", 1, photometric.value)
if colormap is not None:
addtag("ColorMap", "H", colormap.size, colormap)
addtag("SamplesPerPixel", "H", 1, samplesperpixel)
if bilevel:
pass
elif planarconfig and samplesperpixel > 1:
addtag("PlanarConfiguration", "H", 1, planarconfig.value)
addtag(
"BitsPerSample",
"H",
samplesperpixel,
(datadtype.itemsize * 8,) * samplesperpixel,
)
else:
addtag("BitsPerSample", "H", 1, datadtype.itemsize * 8)
if extrasamples:
if photometric == RGB and extrasamples == 1:
addtag("ExtraSamples", "H", 1, 1) # associated alpha channel
else:
addtag("ExtraSamples", "H", extrasamples, (0,) * extrasamples)
if resolution is not None:
addtag("XResolution", "2I", 1, rational(resolution[0]))
addtag("YResolution", "2I", 1, rational(resolution[1]))
if len(resolution) > 2:
unit = resolution[2]
unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit)
elif self._imagej:
unit = 1
else:
unit = 2
addtag("ResolutionUnit", "H", 1, unit)
elif not self._imagej:
addtag("XResolution", "2I", 1, (1, 1))
addtag("YResolution", "2I", 1, (1, 1))
addtag("ResolutionUnit", "H", 1, 1)
if ijmetadata:
for t in imagej_metadata_tags(ijmetadata, byteorder):
addtag(*t)
contiguous = not compress
if tile:
# one chunk per tile per plane
tiles = (
(shape[2] + tile[0] - 1) // tile[0],
(shape[3] + tile[1] - 1) // tile[1],
(shape[4] + tile[2] - 1) // tile[2],
)
numtiles = product(tiles) * shape[1]
stripbytecounts = [
product(tile) * shape[-1] * datadtype.itemsize
] * numtiles
addtag(tagbytecounts, offsetformat, numtiles, stripbytecounts)
addtag(tag_offsets, offsetformat, numtiles, [0] * numtiles)
contiguous = contiguous and product(tiles) == 1
if not contiguous:
# allocate tile buffer
chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype)
elif contiguous:
# one strip per plane
if bilevel:
stripbytecounts = [product(datashape[2:])] * shape[1]
else:
stripbytecounts = [product(datashape[2:]) * datadtype.itemsize] * shape[
1
]
addtag(tagbytecounts, offsetformat, shape[1], stripbytecounts)
addtag(tag_offsets, offsetformat, shape[1], [0] * shape[1])
addtag("RowsPerStrip", "I", 1, shape[-3])
else:
# compress rowsperstrip or ~64 KB chunks
rowsize = product(shape[-2:]) * datadtype.itemsize
if rowsperstrip is None:
rowsperstrip = 65536 // rowsize
if rowsperstrip < 1:
rowsperstrip = 1
elif rowsperstrip > shape[-3]:
rowsperstrip = shape[-3]
addtag("RowsPerStrip", "I", 1, rowsperstrip)
numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip
numstrips *= shape[1]
stripbytecounts = [0] * numstrips
addtag(tagbytecounts, offsetformat, numstrips, [0] * numstrips)
addtag(tag_offsets, offsetformat, numstrips, [0] * numstrips)
if data is None and not contiguous:
raise ValueError("cannot write non-contiguous empty file")
# add extra tags from user
for t in extratags:
addtag(*t)
# TODO: check TIFFReadDirectoryCheckOrder warning in files containing
# multiple tags of same code
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not (self._bigtiff or self._imagej) and (fh.tell() + datasize > 2**31 - 1):
raise ValueError("data too large for standard TIFF file")
# if not compressed or multi-tiled, write the first IFD and then
# all data contiguously; else, write all IFDs and data interleaved
for pageindex in range(1 if contiguous else shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
if pos % 2:
# location of IFD must begin on a word boundary
fh.write(b"\0")
pos += 1
fh.seek(self._ifdoffset)
fh.write(pack(offsetformat, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(tagnoformat, len(tags)))
tag_offset = fh.tell()
fh.write(b"".join(t[1] for t in tags))
self._ifdoffset = fh.tell()
fh.write(pack(offsetformat, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
if pos % 2:
# tag value is expected to begin on word boundary
fh.write(b"\0")
pos += 1
fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4)
fh.write(pack(offsetformat, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
stripoffsetsoffset = pos
elif tag[0] == tagbytecounts:
strip_bytecounts_offset = pos
elif tag[0] == 270 and tag[2].endswith(b"\0\0\0\0"):
# image description buffer
self._descriptionoffset = pos
self._descriptionlenoffset = tag_offset + tagindex * tagsize + 4
fh.write(tag[2])
# write image data
data_offset = fh.tell()
skip = align - data_offset % align
fh.seek(skip, 1)
data_offset += skip
if contiguous:
if data is None:
fh.write_empty(datasize)
else:
fh.write_array(data)
elif tile:
if data is None:
fh.write_empty(numtiles * stripbytecounts[0])
else:
stripindex = 0
for plane in data[pageindex]:
for tz in range(tiles[0]):
for ty in range(tiles[1]):
for tx in range(tiles[2]):
c0 = min(tile[0], shape[2] - tz * tile[0])
c1 = min(tile[1], shape[3] - ty * tile[1])
c2 = min(tile[2], shape[4] - tx * tile[2])
chunk[c0:, c1:, c2:] = 0
chunk[:c0, :c1, :c2] = plane[
tz * tile[0] : tz * tile[0] + c0,
ty * tile[1] : ty * tile[1] + c1,
tx * tile[2] : tx * tile[2] + c2,
]
if compress:
t = compress(chunk)
fh.write(t)
stripbytecounts[stripindex] = len(t)
stripindex += 1
else:
fh.write_array(chunk)
fh.flush()
elif compress:
# write one strip per rowsperstrip
assert data.shape[2] == 1 # not handling depth
numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip
stripindex = 0
for plane in data[pageindex]:
for i in range(numstrips):
strip = plane[0, i * rowsperstrip : (i + 1) * rowsperstrip]
strip = compress(strip)
fh.write(strip)
stripbytecounts[stripindex] = len(strip)
stripindex += 1
# update strip/tile offsets and bytecounts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(stripoffsetsoffset)
strip_offset = data_offset
for size in stripbytecounts:
fh.write(pack(offsetformat, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4)
fh.write(pack(offsetformat, data_offset))
elif tag[0] == tagbytecounts: # strip/tile bytecounts
if compress:
if tag[2]:
fh.seek(strip_bytecounts_offset)
for size in stripbytecounts:
fh.write(pack(offsetformat, size))
else:
fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4)
fh.write(pack(offsetformat, stripbytecounts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [tag for tag in tags if not tag[-1]]
self._shape = shape
self._datashape = (1,) + input_shape
self._datadtype = datadtype
self._dataoffset = data_offset
self._databytecounts = stripbytecounts
if contiguous:
# write remaining IFDs/tags later
self._tags = tags
# return offset and size of image data
if returnoffset:
return data_offset, sum(stripbytecounts)
def _write_remaining_pages(self):
"""Write outstanding IFDs and tags to file."""
if not self._tags or self._truncate:
return
fh = self._fh
fhpos = fh.tell()
if fhpos % 2:
fh.write(b"\0")
fhpos += 1
byteorder = self._byteorder
offsetformat = self._offsetformat
offsetsize = self._offsetsize
tagnoformat = self._tagnoformat
tagsize = self._tagsize
dataoffset = self._dataoffset
pagedatasize = sum(self._databytecounts)
pageno = self._shape[0] * self._datashape[0] - 1
def pack(fmt, *val):
return struct.pack(byteorder + fmt, *val)
# construct template IFD in memory
# need to patch offsets to next IFD and data before writing to disk
ifd = io.BytesIO()
ifd.write(pack(tagnoformat, len(self._tags)))
tagoffset = ifd.tell()
ifd.write(b"".join(t[1] for t in self._tags))
ifdoffset = ifd.tell()
ifd.write(pack(offsetformat, 0)) # offset to next IFD
# tag values
for tagindex, tag in enumerate(self._tags):
offset2value = tagoffset + tagindex * tagsize + offsetsize + 4
if tag[2]:
pos = ifd.tell()
if pos % 2: # tag value is expected to begin on word boundary
ifd.write(b"\0")
pos += 1
ifd.seek(offset2value)
try:
ifd.write(pack(offsetformat, pos + fhpos))
except Exception: # struct.error
if self._imagej:
warnings.warn("truncating ImageJ file")
self._truncate = True
return
raise ValueError("data too large for non-BigTIFF file")
ifd.seek(pos)
ifd.write(tag[2])
if tag[0] == self._tagoffsets:
# save strip/tile offsets for later updates
stripoffset2offset = offset2value
stripoffset2value = pos
elif tag[0] == self._tagoffsets:
# save strip/tile offsets for later updates
stripoffset2offset = None
stripoffset2value = offset2value
# size to word boundary
if ifd.tell() % 2:
ifd.write(b"\0")
# check if all IFDs fit in file
pos = fh.tell()
if not self._bigtiff and pos + ifd.tell() * pageno > 2**32 - 256:
if self._imagej:
warnings.warn("truncating ImageJ file")
self._truncate = True
return
raise ValueError("data too large for non-BigTIFF file")
# TODO: assemble IFD chain in memory
for _ in range(pageno):
# update pointer at IFD offset
pos = fh.tell()
fh.seek(self._ifdoffset)
fh.write(pack(offsetformat, pos))
fh.seek(pos)
self._ifdoffset = pos + ifdoffset
# update strip/tile offsets in IFD
dataoffset += pagedatasize # offset to image data
if stripoffset2offset is None:
ifd.seek(stripoffset2value)
ifd.write(pack(offsetformat, dataoffset))
else:
ifd.seek(stripoffset2offset)
ifd.write(pack(offsetformat, pos + stripoffset2value))
ifd.seek(stripoffset2value)
stripoffset = dataoffset
for size in self._databytecounts:
ifd.write(pack(offsetformat, stripoffset))
stripoffset += size
# write IFD entry
fh.write(ifd.getvalue())
self._tags = None
self._datadtype = None
self._dataoffset = None
self._databytecounts = None
# do not reset _shape or _data_shape
def _write_image_description(self):
"""Write meta data to ImageDescription tag."""
if (
not self._datashape
or self._datashape[0] == 1
or self._descriptionoffset <= 0
):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._datashape, isrgb, colormapped, **self._metadata
)
else:
description = json_description(self._datashape, **self._metadata)
# rewrite description and its length to file
description = description.encode("utf-8")
description = description[: self._descriptionlen - 1]
pos = self._fh.tell()
self._fh.seek(self._descriptionoffset)
self._fh.write(description)
self._fh.seek(self._descriptionlenoffset)
self._fh.write(
struct.pack(self._byteorder + self._offsetformat, len(description) + 1)
)
self._fh.seek(pos)
self._descriptionoffset = 0
self._descriptionlenoffset = 0
self._descriptionlen = 0
def _now(self):
"""Return current date and time."""
return datetime.datetime.now()
def close(self):
"""Write remaining pages and close file handle."""
if not self._truncate:
self._write_remaining_pages()
self._write_image_description()
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
TiffWriter
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 1010410,
"end": 1010787
}
|
class ____(
sgqlc.types.Type,
Node,
AuditEntry,
OrganizationAuditEntryData,
RepositoryAuditEntryData,
TeamAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("is_ldap_mapped",)
is_ldap_mapped = sgqlc.types.Field(Boolean, graphql_name="isLdapMapped")
|
TeamAddRepositoryAuditEntry
|
python
|
huggingface__transformers
|
tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py
|
{
"start": 26711,
"end": 30061
}
|
class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None
def test_inference_object_detection_head(self):
model = RTDetrV2ForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape_logits = torch.Size((1, 300, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expectations = Expectations(
{
(None, None): [[-3.7047, -5.1914, -6.1787], [-4.0108, -9.3449, -5.2047], [-4.1287, -4.7461, -5.8633]],
("cuda", 8): [[-3.7039, -5.1923, -6.1787], [-4.0106, -9.3452, -5.2045], [-4.1285, -4.7468, -5.8641]],
}
)
expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device)
expectations = Expectations(
{
(None, None): [[0.2582, 0.5497, 0.4764], [0.1684, 0.1985, 0.2120], [0.7665, 0.4146, 0.4669]],
}
)
expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, atol=2e-4, rtol=2e-4)
expected_shape_boxes = torch.Size((1, 300, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=2e-4, rtol=2e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
)[0]
expectations = Expectations(
{
(None, None): [0.9652, 0.9599, 0.9462, 0.8613],
("cuda", 8): [0.9652, 0.9599, 0.9461, 0.8613],
}
)
expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device)
expected_labels = [15, 15, 65, 57]
expectations = Expectations(
{
(None, None): [
[3.4114e02, 2.5111e01, 6.3998e02, 3.7289e02],
[1.2780e01, 5.6346e01, 3.1767e02, 4.7134e02],
[3.9959e01, 7.3117e01, 1.7565e02, 1.1744e02],
[-1.0521e-01, 2.9717e00, 6.3989e02, 4.7362e02],
],
("cuda", 8): [
[3.4114e02, 2.5111e01, 6.3998e02, 3.7289e02],
[1.2779e01, 5.6347e01, 3.1767e02, 4.7134e02],
[3.9959e01, 7.3117e01, 1.7565e02, 1.1744e02],
[-1.0502e-01, 2.9707e00, 6.3989e02, 4.7362e02],
],
}
)
expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=2e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, atol=1e-3, rtol=2e-4)
|
RTDetrV2ModelIntegrationTest
|
python
|
Textualize__textual
|
tests/test_screen_modes.py
|
{
"start": 288,
"end": 838
}
|
class ____(Screen[None]):
BINDINGS = [
("1", "one", "Mode 1"),
("2", "two", "Mode 2"),
("p", "push", "Push rnd scrn"),
("o", "app.pop_screen", "Pop"),
("r", "remove", "Remove mode 1"),
]
def action_one(self) -> None:
self.app.switch_mode("one")
def action_two(self) -> None:
self.app.switch_mode("two")
def action_fruits(self) -> None:
self.app.switch_mode("fruits")
def action_push(self) -> None:
self.app.push_screen(FruitModal())
|
ScreenBindingsMixin
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/validators/actions/test_ticketing.py
|
{
"start": 1378,
"end": 1501
}
|
class ____(BaseTicketingActionValidatorTest):
__test__ = True
provider = Action.Type.GITHUB
|
TestGithubActionValidator
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 990186,
"end": 992069
}
|
class ____(sgqlc.types.Type):
"""A Stripe Connect account for receiving sponsorship funds from
GitHub Sponsors.
"""
__schema__ = github_schema
__field_names__ = (
"account_id",
"billing_country_or_region",
"country_or_region",
"is_active",
"sponsors_listing",
"stripe_dashboard_url",
)
account_id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="accountId")
"""The account number used to identify this Stripe Connect account."""
billing_country_or_region = sgqlc.types.Field(String, graphql_name="billingCountryOrRegion")
"""The name of the country or region of an external account, such as
a bank account, tied to the Stripe Connect account. Will only
return a value when queried by the maintainer of the associated
GitHub Sponsors profile themselves, or by an admin of the
sponsorable organization.
"""
country_or_region = sgqlc.types.Field(String, graphql_name="countryOrRegion")
"""The name of the country or region of the Stripe Connect account.
Will only return a value when queried by the maintainer of the
associated GitHub Sponsors profile themselves, or by an admin of
the sponsorable organization.
"""
is_active = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isActive")
"""Whether this Stripe Connect account is currently in use for the
associated GitHub Sponsors profile.
"""
sponsors_listing = sgqlc.types.Field(sgqlc.types.non_null("SponsorsListing"), graphql_name="sponsorsListing")
"""The GitHub Sponsors profile associated with this Stripe Connect
account.
"""
stripe_dashboard_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="stripeDashboardUrl")
"""The URL to access this Stripe Connect account on Stripe's website."""
|
StripeConnectAccount
|
python
|
google__pytype
|
pytype/pytd/pytd.py
|
{
"start": 17785,
"end": 18317
}
|
class ____(GenericType):
"""Special generic type for a Callable that specifies its argument types.
A Callable with N arguments has N+1 parameters. The first N parameters are
the individual argument types, in the order of the arguments, and the last
parameter is the return type.
"""
@property
def args(self):
return self.parameters[:-1]
@property
def ret(self):
return self.parameters[-1]
def has_paramspec(self):
return self.args and isinstance(self.args[0], (ParamSpec, Concatenate))
|
CallableType
|
python
|
milvus-io__pymilvus
|
tests/test_orm_iterator.py
|
{
"start": 6404,
"end": 7613
}
|
class ____:
"""Test iterator-related constants and their usage"""
def test_batch_size_limits(self):
"""Test batch size calculation respects limits"""
# Test minimum batch size
batch_size = 1
next_param = {PARAMS: {}}
result = extend_batch_size(batch_size, next_param, False)
assert result >= 1
# Test maximum batch size
batch_size = MAX_BATCH_SIZE * 10
result = extend_batch_size(batch_size, next_param, False)
assert result <= MAX_BATCH_SIZE
def test_extension_rate_application(self):
"""Test search extension rate is applied correctly"""
batch_size = 100
next_param = {PARAMS: {}}
# Without extension
result_no_ext = extend_batch_size(batch_size, next_param, False)
# With extension
result_with_ext = extend_batch_size(batch_size, next_param, True)
# Extension should increase batch size
assert result_with_ext >= result_no_ext
# Extension should be by DEFAULT_SEARCH_EXTENSION_RATE
if result_with_ext < MAX_BATCH_SIZE:
assert result_with_ext == batch_size * DEFAULT_SEARCH_EXTENSION_RATE
|
TestIteratorConstants
|
python
|
pappasam__jedi-language-server
|
jedi_language_server/initialization_options.py
|
{
"start": 2881,
"end": 2947
}
|
class ____:
enable: bool = False
@light_dataclass
|
SemanticTokens
|
python
|
celery__celery
|
t/integration/test_tasks.py
|
{
"start": 20015,
"end": 22942
}
|
class ____:
"""Tests for tasks called via apply() method."""
def test_apply_single_task_ids(self, manager):
"""Test that a single task called via apply() has correct IDs."""
@manager.app.task(bind=True)
def single_apply_task(self):
return {
'task_id': self.request.id,
'parent_id': self.request.parent_id,
'root_id': self.request.root_id,
}
result = single_apply_task.apply()
data = result.get()
# Single task should have no parent and root_id should equal task_id
assert data['parent_id'] is None
assert data['root_id'] == data['task_id']
def test_apply_nested_parent_child_relationship(self, manager):
"""Test parent-child relationship when one task calls another via apply()."""
@manager.app.task(bind=True)
def grandchild_task(task_self):
return {
'task_id': task_self.request.id,
'parent_id': task_self.request.parent_id,
'root_id': task_self.request.root_id,
'name': 'grandchild_task'
}
@manager.app.task(bind=True)
def child_task(task_self):
# Call grandchild task via apply()
grandchild_data = grandchild_task.apply().get()
return {
'task_id': task_self.request.id,
'parent_id': task_self.request.parent_id,
'root_id': task_self.request.root_id,
'name': 'child_task',
'grandchild_data': grandchild_data
}
@manager.app.task(bind=True)
def parent_task(task_self):
# Call child task via apply()
child_data = child_task.apply().get()
parent_data = {
'task_id': task_self.request.id,
'parent_id': task_self.request.parent_id,
'root_id': task_self.request.root_id,
'name': 'parent_task',
'child_data': child_data
}
return parent_data
result = parent_task.apply()
parent_data = result.get()
child_data = parent_data['child_data']
grandchild_data = child_data['grandchild_data']
# Verify parent task
assert parent_data['name'] == 'parent_task'
assert parent_data['parent_id'] is None
assert parent_data['root_id'] == parent_data['task_id']
# Verify child task
assert child_data['name'] == 'child_task'
assert child_data['parent_id'] == parent_data['task_id']
assert child_data['root_id'] == parent_data['task_id']
# Verify grandchild task
assert grandchild_data['name'] == 'grandchild_task'
assert grandchild_data['parent_id'] == child_data['task_id']
assert grandchild_data['root_id'] == parent_data['task_id']
|
test_apply_tasks
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_bitcoin_address_positive_balance.py
|
{
"start": 2098,
"end": 4926
}
|
class ____(ColumnMapExpectation):
"""Expect column values Bitcoin address has got positive balance (>0)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"bc1qazcm763858nkj2dj986etajv6wquslv8uxwczt",
"bc1qa5wkgaew2dkv56kfvj49j0av5nml45x9ek9hz6",
"3LYJfcfHPXYJreMsASk2jkn69LWEYKzexb",
"37XuVSEpWW4trkfmvWzegTHQt7BdktSKUs",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_bitcoin_address_positive_balance"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator", "blockcypher"],
}
if __name__ == "__main__":
ExpectColumnValuesBitcoinAddressPositiveBalance().print_diagnostic_checklist()
|
ExpectColumnValuesBitcoinAddressPositiveBalance
|
python
|
zostera__django-bootstrap4
|
example/app/forms.py
|
{
"start": 405,
"end": 2732
}
|
class ____(forms.Form):
"""Form with a variety of widgets to test bootstrap4 rendering."""
date = forms.DateField(required=False)
datetime = forms.SplitDateTimeField(widget=AdminSplitDateTime(), required=False)
subject = forms.CharField(
max_length=100,
help_text="my_help_text",
required=True,
widget=forms.TextInput(attrs={"placeholder": "placeholdertest"}),
)
xss_field = forms.CharField(label='XSS" onmouseover="alert(\'Hello, XSS\')" foo="', max_length=100)
password = forms.CharField(widget=forms.PasswordInput)
message = forms.CharField(required=False, help_text="<i>my_help_text</i>")
sender = forms.EmailField(label="Sender © unicode", help_text='E.g., "me@example.com"')
secret = forms.CharField(initial=42, widget=forms.HiddenInput)
cc_myself = forms.BooleanField(
required=False, help_text='cc stands for "carbon copy." You will get a copy in your mailbox.'
)
select1 = forms.ChoiceField(choices=RADIO_CHOICES)
select2 = forms.MultipleChoiceField(choices=RADIO_CHOICES, help_text="Check as many as you like.")
select3 = forms.ChoiceField(choices=MEDIA_CHOICES)
select4 = forms.MultipleChoiceField(choices=MEDIA_CHOICES, help_text="Check as many as you like.")
category1 = forms.ChoiceField(choices=RADIO_CHOICES, widget=forms.RadioSelect)
category2 = forms.MultipleChoiceField(
choices=RADIO_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
category3 = forms.ChoiceField(widget=forms.RadioSelect, choices=MEDIA_CHOICES)
category4 = forms.MultipleChoiceField(
choices=MEDIA_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
category5 = forms.ChoiceField(widget=RadioSelectButtonGroup, choices=MEDIA_CHOICES)
addon = forms.CharField(widget=forms.TextInput(attrs={"addon_before": "before", "addon_after": "after"}))
required_css_class = "bootstrap4-req"
# Set this to allow tests to work properly in Django 1.10+
# More information, see issue #337
use_required_attribute = False
def clean(self):
cleaned_data = super().clean()
raise forms.ValidationError("This error was added to show the non field errors styling.")
return cleaned_data
|
TestForm
|
python
|
kamyu104__LeetCode-Solutions
|
Python/paint-fence.py
|
{
"start": 29,
"end": 517
}
|
class ____(object):
def numWays(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
if n == 0:
return 0
elif n == 1:
return k
ways = [0] * 3
ways[0] = k
ways[1] = (k - 1) * ways[0] + k
for i in xrange(2, n):
ways[i % 3] = (k - 1) * (ways[(i - 1) % 3] + ways[(i - 2) % 3])
return ways[(n - 1) % 3]
# Time: O(n)
# Space: O(n)
# DP solution.
|
Solution
|
python
|
gevent__gevent
|
src/gevent/testing/testcase.py
|
{
"start": 5144,
"end": 6752
}
|
class ____(gevent.Timeout):
_expire_info = ''
def __init__(self, timeout, method='Not Given'):
gevent.Timeout.__init__(
self,
timeout,
'%r: test timed out (set class __timeout__ to increase)\n' % (method,),
ref=False
)
def _on_expiration(self, prev_greenlet, ex):
from gevent.util import format_run_info
loop = gevent.get_hub().loop
debug_info = 'N/A'
if hasattr(loop, 'debug'):
debug_info = [str(s) for s in loop.debug()]
run_info = format_run_info()
self._expire_info = 'Loop Debug:\n%s\nRun Info:\n%s' % (
'\n'.join(debug_info), '\n'.join(run_info)
)
gevent.Timeout._on_expiration(self, prev_greenlet, ex)
def __str__(self):
s = gevent.Timeout.__str__(self)
s += self._expire_info
return s
def _wrap_timeout(timeout, method):
if timeout is None:
return method
@wraps(method)
def timeout_wrapper(self, *args, **kwargs):
with TestTimeout(timeout, method):
return method(self, *args, **kwargs)
return timeout_wrapper
def _get_class_attr(classDict, bases, attr, default=AttributeError):
NONE = object()
value = classDict.get(attr, NONE)
if value is not NONE:
return value
for base in bases:
value = getattr(base, attr, NONE)
if value is not NONE:
return value
if default is AttributeError:
raise AttributeError('Attribute %r not found\n%s\n%s\n' % (attr, classDict, bases))
return default
|
TestTimeout
|
python
|
lepture__authlib
|
authlib/oauth2/rfc9101/errors.py
|
{
"start": 789,
"end": 999
}
|
class ____(OAuth2Error):
error = "request_uri_not_supported"
description = "The authorization server does not support the use of the request_uri parameter."
status_code = 400
|
RequestUriNotSupportedError
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_cond_format19.py
|
{
"start": 345,
"end": 4802
}
|
class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.write("A10", 10)
worksheet.write("A11", 11)
worksheet.write("A12", 12)
worksheet.conditional_format(
"A1:A12",
{
"type": "data_bar",
"min_value": 5,
"mid_value": 52, # Should be ignored.
"max_value": 90,
"min_type": "num",
"mid_type": "percentile", # Should be ignored.
"max_type": "percent",
"bar_color": "#8DB4E3",
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A12">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="num" val="5"/>
<cfvo type="percent" val="90"/>
<color rgb="FF8DB4E3"/>
</dataBar>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
|
TestAssembleWorksheet
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_access_log_middleware.py
|
{
"start": 11065,
"end": 11589
}
|
class ____(LogCaptureAPITestCase):
endpoint = "dummy-endpoint"
def test_access_log_success(self) -> None:
token = None
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["event:read", "org:read"])
self.login_as(user=self.create_user())
self.get_success_response(extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"})
assert len(self.captured_logs) == 0
@all_silo_test
|
TestAccessLogSuccessNotLoggedInDev
|
python
|
getsentry__sentry
|
src/sentry/utils/cursors.py
|
{
"start": 199,
"end": 355
}
|
class ____(Protocol):
def __call__(self, value: T, for_prev: bool = ...) -> CursorValue: ...
OnResultCallable = Callable[[Sequence[T]], Any]
|
KeyCallable
|
python
|
pypa__warehouse
|
warehouse/utils/db/orm.py
|
{
"start": 114,
"end": 599
}
|
class ____(Exception):
"""Raised when there is no active SQLAlchemy session"""
def orm_session_from_obj(obj) -> Session:
"""
Returns the session from the ORM object.
Adds guard, but it should never happen.
The guard helps with type hinting, as the object_session function
returns Optional[Session] type.
"""
session = object_session(obj)
if not session:
raise NoSessionError("Object does not have a session")
return session
|
NoSessionError
|
python
|
pytorch__pytorch
|
test/fx/test_z3_gradual_types.py
|
{
"start": 1313,
"end": 2553
}
|
class ____(unittest.TestCase):
def test_dim(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([1, 2])):
y = x.dim()
return y
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
y_res = z3.z3.Int(2)
self.assertEqual(s.model()[y_res], 2)
def test_reshape(self):
"""
In this example, we prove that some nodes must
always have a fixed shape regardless of the input
"""
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn):
y = x.view(100)
tmp = y.size()[0]
return tmp
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
dim = z3.Int(4)
self.assertEqual(s.model()[dim], 100)
# print(s.model()[dim])
|
TorchDynamoUseCases
|
python
|
django__django
|
tests/aggregation/tests.py
|
{
"start": 92133,
"end": 99768
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(age=1)
cls.a2 = Author.objects.create(age=2)
cls.p1 = Publisher.objects.create(num_awards=1)
cls.p2 = Publisher.objects.create(num_awards=0)
cls.b1 = Book.objects.create(
name="b1",
publisher=cls.p1,
pages=100,
rating=4.5,
price=10,
contact=cls.a1,
pubdate=datetime.date.today(),
)
cls.b1.authors.add(cls.a1)
cls.b2 = Book.objects.create(
name="b2",
publisher=cls.p2,
pages=1000,
rating=3.2,
price=50,
contact=cls.a2,
pubdate=datetime.date.today(),
)
cls.b2.authors.add(cls.a1, cls.a2)
def test_unused_aliased_aggregate_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.alias(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_unused_aliased_aggregate_and_annotation_reverse_fk(self):
Book.objects.create(
name="b3",
publisher=self.p2,
pages=1000,
rating=4.2,
price=50,
contact=self.a2,
pubdate=datetime.date.today(),
)
qs = Publisher.objects.annotate(
total_pages=Sum("book__pages"),
good_book=Case(
When(book__rating__gt=4.0, then=Value(True)),
default=Value(False),
),
)
self.assertEqual(qs.count(), 3)
def test_unused_aliased_aggregate_and_annotation_reverse_fk_grouped(self):
Book.objects.create(
name="b3",
publisher=self.p2,
pages=1000,
rating=4.2,
price=50,
contact=self.a2,
pubdate=datetime.date.today(),
)
qs = (
Publisher.objects.values("id", "name")
.annotate(total_pages=Sum("book__pages"))
.annotate(
good_book=Case(
When(book__rating__gt=4.0, then=Value(True)),
default=Value(False),
)
)
)
self.assertEqual(qs.count(), 3)
def test_non_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
name_lower=Lower("name"),
).count()
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 1, "No subquery wrapping required")
self.assertNotIn("name_lower", sql)
def test_unreferenced_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.annotate(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_referenced_aggregate_annotation_kept(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
authors_count=Count("authors"),
).aggregate(Avg("authors_count"))
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertEqual(sql.count("authors_count"), 2)
def test_referenced_group_by_annotation_kept(self):
queryset = Book.objects.values(pages_mod=Mod("pages", 10)).annotate(
mod_count=Count("*")
)
self.assertEqual(queryset.count(), 1)
def test_referenced_subquery_requires_wrapping(self):
total_books_qs = (
Author.book_set.through.objects.values("author")
.filter(author=OuterRef("pk"))
.annotate(total=Count("book"))
)
with self.assertNumQueries(1) as ctx:
aggregate = (
Author.objects.annotate(
total_books=Subquery(total_books_qs.values("total"))
)
.values("pk", "total_books")
.aggregate(
sum_total_books=Sum("total_books"),
)
)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 3, "Subquery wrapping required")
self.assertEqual(aggregate, {"sum_total_books": 3})
def test_referenced_composed_subquery_requires_wrapping(self):
total_books_qs = (
Author.book_set.through.objects.values("author")
.filter(author=OuterRef("pk"))
.annotate(total=Count("book"))
)
with self.assertNumQueries(1) as ctx:
aggregate = (
Author.objects.annotate(
total_books=Subquery(total_books_qs.values("total")),
total_books_ref=F("total_books") / 1,
)
.values("pk", "total_books_ref")
.aggregate(
sum_total_books=Sum("total_books_ref"),
)
)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 3, "Subquery wrapping required")
self.assertEqual(aggregate, {"sum_total_books": 3})
@skipUnlessDBFeature("supports_over_clause")
def test_referenced_window_requires_wrapping(self):
total_books_qs = Book.objects.annotate(
avg_publisher_pages=Coalesce(
Window(Avg("pages"), partition_by=F("publisher")),
0.0,
)
)
with self.assertNumQueries(1) as ctx:
aggregate = total_books_qs.aggregate(
sum_avg_publisher_pages=Sum("avg_publisher_pages"),
books_count=Count("id"),
)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertEqual(
aggregate,
{"sum_avg_publisher_pages": 1100.0, "books_count": 2},
)
def test_aggregate_reference_lookup_rhs(self):
aggregates = Author.objects.annotate(
max_book_author=Max("book__authors"),
).aggregate(count=Count("id", filter=Q(id=F("max_book_author"))))
self.assertEqual(aggregates, {"count": 1})
def test_aggregate_reference_lookup_rhs_iter(self):
aggregates = Author.objects.annotate(
max_book_author=Max("book__authors"),
).aggregate(count=Count("id", filter=Q(id__in=[F("max_book_author"), 0])))
self.assertEqual(aggregates, {"count": 1})
@skipUnlessDBFeature("supports_select_union")
def test_aggregate_combined_queries(self):
# Combined queries could have members in their values select mask while
# others have them in their annotation mask which makes annotation
# pruning complex to implement hence why it's not implemented.
qs = Author.objects.values(
"age",
other=Value(0),
).union(
Book.objects.values(
age=Value(0),
other=Value(0),
)
)
self.assertEqual(qs.count(), 3)
|
AggregateAnnotationPruningTests
|
python
|
google__pytype
|
pytype/blocks/blocks_test.py
|
{
"start": 616,
"end": 891
}
|
class ____(unittest.TestCase, test_utils.MakeCodeMixin):
"""A base class for implementing tests testing blocks.py."""
# These tests check disassembled bytecode, which varies from version to
# version, so we fix the test version.
python_version = (3, 10)
|
BaseBlocksTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_build_notifications.py
|
{
"start": 692,
"end": 12572
}
|
class ____(TestCase):
def setUp(self):
self.project = get(Project, slug="test", language="en")
self.version = get(Version, project=self.project, slug="1.0")
self.build = get(Build, version=self.version, commit="abc1234567890")
@mock.patch("readthedocs.builds.managers.log")
def test_send_notification_none_if_wrong_version_pk(self, mock_logger):
self.assertFalse(Version.objects.filter(pk=345343).exists())
send_build_notifications(
version_pk=345343,
build_pk=None,
event=WebHookEvent.BUILD_FAILED,
)
mock_logger.warning.assert_called_with(
"Version not found for given kwargs.",
kwargs={"pk": 345343},
)
def test_send_notification_none(self):
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(len(mail.outbox), 0)
@requests_mock.Mocker(kw="mock_request")
def test_send_webhook_notification(self, mock_request):
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
)
self.assertEqual(webhook.exchanges.all().count(), 0)
mock_request.post(webhook.url)
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(webhook.exchanges.all().count(), 1)
self.assertEqual(len(mail.outbox), 0)
def test_dont_send_webhook_notifications_for_external_versions(self):
webhook = get(WebHook, url="https://example.com/webhook/", project=self.project)
self.version.type = EXTERNAL
self.version.save()
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(webhook.exchanges.all().count(), 0)
def test_webhook_notification_has_content_type_header(self):
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
)
data = json.dumps(
{
"name": self.project.name,
"slug": self.project.slug,
"build": {
"id": self.build.id,
"commit": self.build.commit,
"state": self.build.state,
"success": self.build.success,
"date": self.build.date.strftime("%Y-%m-%d %H:%M:%S"),
},
}
)
with mock.patch("readthedocs.builds.tasks.requests.post") as post:
post.return_value = None
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
post.assert_called_once_with(
webhook.url,
data=data,
headers={
"content-type": "application/json",
"X-Hub-Signature": mock.ANY,
"User-Agent": mock.ANY,
"X-RTD-Event": mock.ANY,
},
timeout=mock.ANY,
)
@requests_mock.Mocker(kw="mock_request")
def test_send_webhook_custom_on_given_event(self, mock_request):
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[
WebHookEvent.objects.get(name=WebHookEvent.BUILD_TRIGGERED),
WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED),
],
payload="{}",
)
mock_request.post(webhook.url)
for event, _ in WebHookEvent.EVENTS:
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=event,
)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(webhook.exchanges.all().count(), 2)
@requests_mock.Mocker(kw="mock_request")
def test_send_webhook_custom_payload(self, mock_request):
self.build.date = timezone.datetime(
year=2021,
month=3,
day=15,
hour=15,
minute=30,
second=4,
)
self.build.save()
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED)],
payload=json.dumps(
{
"message": "Event {{ event }} triggered for {{ version.slug }}",
"extra-data": {
"build_id": "{{build.id}}",
"build_commit": "{{build.commit}}",
"build_url": "{{ build.url }}",
"build_docsurl": "{{ build.docs_url }}",
"build_start_date": "{{ build.start_date }}",
"organization_slug": "{{ organization.slug }}",
"organization_name": "{{ organization.name }}",
"project_slug": "{{ project.slug }}",
"project_name": "{{ project.name }}",
"project_url": "{{ project.url }}",
"version_slug": "{{ version.slug }}",
"version_name": "{{ version.name }}",
"invalid_substitution": "{{ invalid.substitution }}",
},
}
),
)
post = mock_request.post(webhook.url)
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertTrue(post.called_once)
request = post.request_history[0]
self.assertEqual(
request.json(),
{
"message": f"Event build:failed triggered for {self.version.slug}",
"extra-data": {
"build_id": str(self.build.pk),
"build_commit": self.build.commit,
"build_url": f"https://readthedocs.org{self.build.get_absolute_url()}",
"build_docsurl": "http://test.readthedocs.io/en/1.0/",
"build_start_date": "2021-03-15T15:30:04",
"organization_name": "",
"organization_slug": "",
"project_name": self.project.name,
"project_slug": self.project.slug,
"project_url": f"https://readthedocs.org{self.project.get_absolute_url()}",
"version_name": self.version.verbose_name,
"version_slug": self.version.slug,
"invalid_substitution": "{{ invalid.substitution }}",
},
},
)
self.assertEqual(webhook.exchanges.all().count(), 1)
@requests_mock.Mocker(kw="mock_request")
def test_webhook_headers(self, mock_request):
secret = "1234"
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED)],
payload='{"sign": "me"}',
secret=secret,
)
post = mock_request.post(webhook.url)
signature = hmac.new(
key=secret.encode(),
msg=webhook.payload.encode(),
digestmod=hashlib.sha256,
).hexdigest()
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertTrue(post.called_once)
request = post.request_history[0]
headers = request.headers
self.assertTrue(headers["User-Agent"].startswith("Read-the-Docs/"))
self.assertEqual(headers["X-Hub-Signature"], signature)
self.assertEqual(headers["X-RTD-Event"], WebHookEvent.BUILD_FAILED)
self.assertEqual(webhook.exchanges.all().count(), 1)
@requests_mock.Mocker(kw="mock_request")
def test_webhook_record_exchange(self, mock_request):
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED)],
payload='{"request": "ok"}',
)
post = mock_request.post(
webhook.url,
json={"response": "ok"},
headers={"X-Greeting": "Hi!"},
status_code=201,
)
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertTrue(post.called_once)
self.assertEqual(webhook.exchanges.all().count(), 1)
exchange = webhook.exchanges.all().first()
self.assertTrue(
exchange.request_headers["User-Agent"].startswith("Read-the-Docs/")
)
self.assertIn("X-Hub-Signature", exchange.request_headers)
self.assertEqual(exchange.request_body, webhook.payload)
self.assertEqual(exchange.response_headers, {"X-Greeting": "Hi!"})
self.assertEqual(exchange.response_body, '{"response": "ok"}')
self.assertEqual(exchange.status_code, 201)
def test_send_email_notification_on_build_failure(self):
get(EmailHook, project=self.project)
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(len(mail.outbox), 1)
def test_dont_send_email_notifications_for_external_versions(self):
get(EmailHook, project=self.project)
self.version.type = EXTERNAL
self.version.save()
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(len(mail.outbox), 0)
def test_dont_send_email_notifications_for_other_events(self):
"""Email notifications are only send for BUILD_FAILED events."""
get(EmailHook, project=self.project)
for event in [WebHookEvent.BUILD_PASSED, WebHookEvent.BUILD_TRIGGERED]:
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=event,
)
self.assertEqual(len(mail.outbox), 0)
@requests_mock.Mocker(kw="mock_request")
def test_send_email_and_webhook_notification(self, mock_request):
get(EmailHook, project=self.project)
webhook = get(
WebHook,
url="https://example.com/webhook/",
project=self.project,
events=[WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
)
mock_request.post(webhook.url)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(webhook.exchanges.all().count(), 0)
send_build_notifications(
version_pk=self.version.pk,
build_pk=self.build.pk,
event=WebHookEvent.BUILD_FAILED,
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(webhook.exchanges.all().count(), 1)
|
BuildNotificationsTests
|
python
|
django__django
|
tests/basic/tests.py
|
{
"start": 22308,
"end": 26978
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
# Create an Article.
cls.a = Article(
id=None,
headline="Swallow programs in Python",
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
cls.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = "Parrot programs in Python"
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertSequenceEqual(Article.objects.all(), [self.a])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith="Swallow"), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(
Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a
)
self.assertEqual(
Article.objects.get(
pub_date__year=2005, pub_date__month=7, pub_date__day=28
),
self.a,
)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(
Article.objects.get(headline="Swallow programs in Python"), self.a
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2005),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__week_day=5),
[self.a],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(
ObjectDoesNotExist, "Article matching query does not exist."
):
Article.objects.get(
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(
ObjectDoesNotExist, "Article matching query does not exist."
):
Article.objects.get(
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertSequenceEqual(Article.objects.filter(pk__in=[self.a.id]), [self.a])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline="Swallow bites Python",
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(
headline__startswith="Swallow",
)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(
pub_date__year=2005,
)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
|
ModelLookupTest
|
python
|
pytorch__pytorch
|
test/jit/fixtures_srcs/test_upgrader_models_generation.py
|
{
"start": 179,
"end": 766
}
|
class ____(TestCase):
def test_all_modules(self):
for a_module in ALL_MODULES:
module_name = type(a_module).__name__
self.assertTrue(
isinstance(a_module, torch.nn.Module),
f"The module {module_name} "
f"is not a torch.nn.module instance. "
f"Please ensure it's a subclass of torch.nn.module in fixtures_src.py"
f"and it's registered as an instance in ALL_MODULES in generated_models.py",
)
if __name__ == "__main__":
run_tests()
|
TestUpgraderModelGeneration
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclass18.py
|
{
"start": 170,
"end": 316
}
|
class ____:
a: Final = 1
v1 = DC1(1)
reveal_type(v1.a, expected_text="Literal[1]")
v2 = DC1()
reveal_type(v2.a, expected_text="Literal[1]")
|
DC1
|
python
|
google__jax
|
jax/_src/numpy/setops.py
|
{
"start": 36243,
"end": 36412
}
|
class ____(NamedTuple):
"""Struct returned by :func:`jax.numpy.unique_all`."""
values: Array
indices: Array
inverse_indices: Array
counts: Array
|
_UniqueAllResult
|
python
|
django__django
|
tests/template_tests/syntax_tests/i18n/test_get_language_info.py
|
{
"start": 154,
"end": 1927
}
|
class ____(SimpleTestCase):
libraries = {
"custom": "template_tests.templatetags.custom",
"i18n": "django.templatetags.i18n",
}
# retrieving language information
@setup(
{
"i18n28_2": "{% load i18n %}"
'{% get_language_info for "de" as l %}'
"{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}"
}
)
def test_i18n28_2(self):
output = self.engine.render_to_string("i18n28_2")
self.assertEqual(output, "de: German/Deutsch bidi=False")
@setup(
{
"i18n29": "{% load i18n %}"
"{% get_language_info for LANGUAGE_CODE as l %}"
"{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}"
}
)
def test_i18n29(self):
output = self.engine.render_to_string("i18n29", {"LANGUAGE_CODE": "fi"})
self.assertEqual(output, "fi: Finnish/suomi bidi=False")
# Test whitespace in filter arguments
@setup(
{
"i18n38": "{% load i18n custom %}"
'{% get_language_info for "de"|noop:"x y" as l %}'
"{{ l.code }}: {{ l.name }}/{{ l.name_local }}/"
"{{ l.name_translated }} bidi={{ l.bidi }}"
}
)
def test_i18n38(self):
with translation.override("cs"):
output = self.engine.render_to_string("i18n38")
self.assertEqual(output, "de: German/Deutsch/německy bidi=False")
@setup({"template": "{% load i18n %}{% get_language_info %}"})
def test_no_for_as(self):
msg = "'get_language_info' requires 'for string as variable' (got [])"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
|
I18nGetLanguageInfoTagTests
|
python
|
keras-team__keras
|
keras/src/trainers/data_adapters/grain_dataset_adapter_test.py
|
{
"start": 293,
"end": 606
}
|
class ____(grain.sources.RandomAccessDataSource):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __getitem__(self, idx):
return np.expand_dims(np.array([self.start + idx]), axis=0)
def __len__(self):
return self.stop - self.start
|
Range2DSource
|
python
|
has2k1__plotnine
|
plotnine/_mpl/layout_manager/_spaces.py
|
{
"start": 17205,
"end": 21136
}
|
class ____(_side_spaces):
"""
Space in the figure for artists above the panel area
Ordered from the edge of the figure and going inwards
"""
plot_margin: float = 0
tag_alignment: float = 0
plot_tag_margin_top: float = 0
plot_tag: float = 0
plot_tag_margin_bottom: float = 0
margin_alignment: float = 0
plot_title_margin_top: float = 0
plot_title: float = 0
plot_title_margin_bottom: float = 0
plot_subtitle_margin_top: float = 0
plot_subtitle: float = 0
plot_subtitle_margin_bottom: float = 0
legend: float = 0
legend_box_spacing: float = 0
strip_text_x_extra_height: float = 0
def _calculate(self):
items = self.items
theme = self.items.plot.theme
calc = self.items.calc
W, H = theme.getp("figure_size")
F = W / H
self.plot_margin = theme.getp("plot_margin_top") * F
if self.has_tag and items.plot_tag:
m = theme.get_margin("plot_tag").fig
self.plot_tag_margin_top = m.t
self.plot_tag = calc.height(items.plot_tag)
self.plot_tag_margin_bottom = m.b
if items.plot_title:
m = theme.get_margin("plot_title").fig
self.plot_title_margin_top = m.t * F
self.plot_title = calc.height(items.plot_title)
self.plot_title_margin_bottom = m.b * F
if items.plot_subtitle:
m = theme.get_margin("plot_subtitle").fig
self.plot_subtitle_margin_top = m.t * F
self.plot_subtitle = calc.height(items.plot_subtitle)
self.plot_subtitle_margin_bottom = m.b * F
if items.legends and items.legends.top:
self.legend = self.legend_height
self.legend_box_spacing = theme.getp("legend_box_spacing") * F
self.strip_text_x_extra_height = items.strip_text_x_extra_height("top")
# Adjust plot_margin to make room for ylabels that protude well
# beyond the axes
# NOTE: This adjustment breaks down when the protrusion is large
protrusion = items.axis_text_y_top_protrusion("all")
adjustment = protrusion - (self.total - self.plot_margin)
if adjustment > 0:
self.plot_margin += adjustment
@property
def offset(self) -> float:
"""
Distance from top of the figure to the top of the plot gridspec
----------------(1, 1)
| ^ |
| |-dy |
| v |
| ---- |
| | | |
| | | |
| | | |
| ---- |
| |
(0, 0)----------------
"""
return self.gs.bbox_relative.y1 - 1
def y1(self, item: str) -> float:
"""
Lower y-coordinate in figure space of the item
"""
return self.to_figure_space(1 - self.sum_incl(item))
def y2(self, item: str) -> float:
"""
Higher y-coordinate in figure space of the item
"""
return self.to_figure_space(1 - self.sum_upto(item))
@property
def panel_top_relative(self):
"""
Top (relative to the gridspec) of the panels in figure dimensions
"""
return 1 - self.total
@property
def panel_top(self):
"""
Top of the panels in figure space
"""
return self.to_figure_space(self.panel_top_relative)
@property
def plot_top(self):
"""
Distance up to the top-most artist in figure space
"""
return self.y2("legend")
@property
def tag_height(self):
"""
The height of the tag including the margins
"""
return (
self.plot_tag_margin_top
+ self.plot_tag
+ self.plot_tag_margin_bottom
)
@dataclass
|
top_spaces
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-operations-to-make-array-continuous.py
|
{
"start": 33,
"end": 749
}
|
class ____(object):
def minOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def unique(nums):
left = 0
for right in xrange(1, len(nums)):
if nums[left] != nums[right]:
left += 1
nums[left] = nums[right]
return left
def erase(nums, i):
while len(nums) > i+1:
nums.pop()
n = len(nums)
nums.sort()
erase(nums, unique(nums))
result = l = 0
for i in xrange(len(nums)):
if nums[i] <= nums[i-l]+n-1:
l += 1
return n-l
# Time: O(nlogn)
# Space: O(n)
|
Solution
|
python
|
optuna__optuna
|
tests/storages_tests/rdb_tests/test_models.py
|
{
"start": 2696,
"end": 4839
}
|
class ____:
@staticmethod
def test_find_by_study_and_key(session: Session) -> None:
study = StudyModel(study_id=1, study_name="test-study")
session.add(
StudySystemAttributeModel(study_id=study.study_id, key="sample-key", value_json="1")
)
session.commit()
attr = StudySystemAttributeModel.find_by_study_and_key(study, "sample-key", session)
assert attr is not None and "1" == attr.value_json
assert StudySystemAttributeModel.find_by_study_and_key(study, "not-found", session) is None
@staticmethod
def test_where_study_id(session: Session) -> None:
sample_study = StudyModel(study_id=1, study_name="test-study")
empty_study = StudyModel(study_id=2, study_name="test-study")
session.add(
StudySystemAttributeModel(
study_id=sample_study.study_id, key="sample-key", value_json="1"
)
)
assert 1 == len(StudySystemAttributeModel.where_study_id(sample_study.study_id, session))
assert 0 == len(StudySystemAttributeModel.where_study_id(empty_study.study_id, session))
# Check the case of unknown study_id.
assert 0 == len(StudySystemAttributeModel.where_study_id(-1, session))
@staticmethod
def test_cascade_delete_on_study(session: Session) -> None:
study_id = 1
direction = StudyDirectionModel(direction=StudyDirection.MINIMIZE, objective=0)
study = StudyModel(study_id=study_id, study_name="test-study", directions=[direction])
study.system_attributes.append(
StudySystemAttributeModel(study_id=study_id, key="sample-key1", value_json="1")
)
study.system_attributes.append(
StudySystemAttributeModel(study_id=study_id, key="sample-key2", value_json="2")
)
session.add(study)
session.commit()
assert 2 == len(StudySystemAttributeModel.where_study_id(study_id, session))
session.delete(study)
session.commit()
assert 0 == len(StudySystemAttributeModel.where_study_id(study_id, session))
|
TestStudySystemAttributeModel
|
python
|
xlwings__xlwings
|
xlwings/_xlmac.py
|
{
"start": 55502,
"end": 57377
}
|
class ____(Collection, base_classes.Pictures):
_attr = "pictures"
_kw = kw.picture
_wrap = Picture
def add(
self,
filename,
link_to_file,
save_with_document,
left,
top,
width,
height,
anchor,
):
if anchor:
top, left = anchor.top, anchor.left
version = VersionNumber(self.parent.book.app.version)
if not link_to_file and version >= 15:
# Office 2016 for Mac is sandboxed. This path seems to work without the
# need of granting access explicitly.
xlwings_picture = (
os.path.expanduser("~")
+ "/Library/Containers/com.microsoft.Excel/Data/xlwings_picture.png"
)
shutil.copy2(filename, xlwings_picture)
filename = xlwings_picture
sheet_index = self.parent.xl.entry_index.get()
picture = Picture(
self.parent,
self.parent.xl.make(
at=self.parent.book.xl.sheets[sheet_index],
new=kw.picture,
with_properties={
kw.file_name: posix_to_hfs_path(filename),
kw.link_to_file: link_to_file,
kw.save_with_document: save_with_document,
kw.width: width,
kw.height: height,
# Top and left: see below
kw.top: 0,
kw.left_position: 0,
},
).name.get(),
)
# Top and left cause an issue in the make command above
# if they are not set to 0 when width & height are -1
picture.top = top if top else 0
picture.left = left if left else 0
if not link_to_file and version >= 15:
os.remove(filename)
return picture
|
Pictures
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/model_query.py
|
{
"start": 580,
"end": 633
}
|
class ____:
def foo(self, x):
return 0
|
Base
|
python
|
scikit-learn__scikit-learn
|
sklearn/_loss/loss.py
|
{
"start": 21052,
"end": 23005
}
|
class ____(BaseLoss):
"""Quantile loss aka pinball loss, for regression.
Domain:
y_true and y_pred all real numbers
quantile in (0, 1)
Link:
y_pred = raw_prediction
For a given sample x_i, the pinball loss is defined as::
loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
rho_{quantile}(u) = u * (quantile - 1_{u<0})
= -u *(1 - quantile) if u < 0
u * quantile if u >= 0
Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
Note that the exact hessian = 0 almost everywhere (except at one point, therefore
differentiable = False). Optimization routines like in HGBT, however, need a
hessian > 0. Therefore, we assign 1.
Additional Attributes
---------------------
quantile : float
The quantile level of the quantile to be estimated. Must be in range (0, 1).
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None, quantile=0.5):
check_scalar(
quantile,
"quantile",
target_type=numbers.Real,
min_val=0,
max_val=1,
include_boundaries="neither",
)
super().__init__(
closs=CyPinballLoss(quantile=float(quantile)),
link=IdentityLink(),
)
self.approx_hessian = True
self.constant_hessian = sample_weight is None
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
if sample_weight is None:
return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
else:
return _weighted_percentile(
y_true, sample_weight, 100 * self.closs.quantile
)
|
PinballLoss
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec10.py
|
{
"start": 591,
"end": 1174
}
|
class ____:
def __init__(self):
self._lock = RLock()
@with_lock
def test_1(self, param1: int) -> str: ...
@with_lock
def test_2(self) -> str: ...
@with_lock
def test_3(cls: MyClass, param1: int) -> str: ...
testClass = MyClass()
res1 = testClass.test_1(42)
reveal_type(res1, expected_text="str")
res2 = testClass.test_2()
reveal_type(res2, expected_text="str")
res3 = test_3(testClass, 42)
reveal_type(res3, expected_text="str")
res4: Callable[[MyClass, int], str] = with_lock(test_3)
reveal_type(res4, expected_text="(MyClass, int) -> str")
|
MyClass
|
python
|
PyCQA__pylint
|
tests/functional/u/used/used_before_assignment.py
|
{
"start": 4306,
"end": 6035
}
|
class ____: # pylint: disable=missing-docstring
"""https://github.com/pylint-dev/pylint/issues/9674"""
def skip(self, msg) -> NoReturn:
raise Exception(msg) # pylint: disable=broad-exception-raised
def print_platform_specific_command(self):
if sys.platform == "linux":
cmd = "ls"
elif sys.platform == "win32":
cmd = "dir"
else:
self.skip("only runs on Linux/Windows")
print(cmd)
# https://github.com/pylint-dev/pylint/issues/9941
try:
x = 1 / 0
except ZeroDivisionError:
print(x) # [used-before-assignment]
try:
y = 1 / 0
print(y)
except ZeroDivisionError:
print(y) # FALSE NEGATIVE
# https://github.com/pylint-dev/pylint/issues/9642
def __():
for i in []:
if i:
fail1 = 42
print(fail1) # [possibly-used-before-assignment]
for i in []:
fail2 = 42
print(fail2) # FALSE NEGATIVE
# https://github.com/pylint-dev/pylint/issues/9689
def outer_():
a = 1
def inner_try():
try:
nonlocal a
print(a) # [used-before-assignment] FALSE POSITIVE
a = 2
print(a)
except:
pass
def inner_while():
i = 0
while i < 2:
i += 1
nonlocal a
print(a) # [used-before-assignment] FALSE POSITIVE
a = 2
print(a)
def inner_for():
for _ in range(2):
nonlocal a
print(a)
a = 2
print(a)
inner_try()
inner_while()
inner_for()
def conditional_import():
if input():
import os.path
else:
os = None
if os:
pass
|
PlatformChecks
|
python
|
scikit-learn__scikit-learn
|
sklearn/compose/tests/test_target.py
|
{
"start": 11722,
"end": 12735
}
|
class ____(DummyRegressor):
def fit(self, X, y, sample_weight=None, check_input=True):
# on the test below we force this to false, we make sure this is
# actually passed to the regressor
assert not check_input
return super().fit(X, y, sample_weight)
def test_transform_target_regressor_pass_fit_parameters():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
)
regr.fit(X, y, check_input=False)
assert regr.transformer_.fit_counter == 1
def test_transform_target_regressor_route_pipeline():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
)
estimators = [("normalize", StandardScaler()), ("est", regr)]
pip = Pipeline(estimators)
pip.fit(X, y, **{"est__check_input": False})
assert regr.transformer_.fit_counter == 1
|
DummyRegressorWithExtraFitParams
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/trainer/connectors/test_callback_connector.py
|
{
"start": 3890,
"end": 9686
}
|
class ____(Callback):
def __init__(self, unique=None, other=None):
self._unique = unique
self._other = other
@property
def state_key(self):
return self._generate_state_key(unique=self._unique)
def state_dict(self):
return {"content1": self._unique}
def test_all_callback_states_saved_before_checkpoint_callback(tmp_path):
"""Test that all callback states get saved even if the ModelCheckpoint is not given as last and when there are
multiple callbacks of the same type."""
callback0 = StatefulCallbackContent0()
callback1 = StatefulCallbackContent1(unique="one")
callback2 = StatefulCallbackContent1(unique="two", other=2)
checkpoint_callback = ModelCheckpoint(dirpath=tmp_path, filename="all_states")
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
max_steps=1,
limit_val_batches=1,
callbacks=[
callback0,
# checkpoint callback does not have to be at the end
checkpoint_callback,
# callback2 and callback3 have the same type
callback1,
callback2,
],
)
trainer.fit(model)
ckpt = torch.load(str(tmp_path / "all_states.ckpt"), weights_only=True)
state0 = ckpt["callbacks"]["StatefulCallbackContent0"]
state1 = ckpt["callbacks"]["StatefulCallbackContent1{'unique': 'one'}"]
state2 = ckpt["callbacks"]["StatefulCallbackContent1{'unique': 'two'}"]
assert "content0" in state0
assert state0["content0"] == 0
assert "content1" in state1
assert state1["content1"] == "one"
assert "content1" in state2
assert state2["content1"] == "two"
assert (
"ModelCheckpoint{'monitor': None, 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None}" in ckpt["callbacks"]
)
@patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False)
def test_attach_model_callbacks():
"""Test that the callbacks defined in the model and through Trainer get merged correctly."""
def _attach_callbacks(trainer_callbacks, model_callbacks):
model = LightningModule()
model.configure_callbacks = lambda: model_callbacks
has_progress_bar = any(isinstance(cb, ProgressBar) for cb in trainer_callbacks + model_callbacks)
trainer = Trainer(
enable_checkpointing=False,
enable_progress_bar=has_progress_bar,
enable_model_summary=False,
callbacks=trainer_callbacks,
)
trainer.strategy._lightning_module = model
cb_connector = _CallbackConnector(trainer)
cb_connector._attach_model_callbacks()
return trainer
early_stopping1 = EarlyStopping(monitor="red")
early_stopping2 = EarlyStopping(monitor="blue")
progress_bar = TQDMProgressBar()
lr_monitor = LearningRateMonitor()
grad_accumulation = GradientAccumulationScheduler({1: 1})
# no callbacks
trainer = _attach_callbacks(trainer_callbacks=[], model_callbacks=[])
assert trainer.callbacks == []
# callbacks of different types
trainer = _attach_callbacks(trainer_callbacks=[early_stopping1], model_callbacks=[progress_bar])
assert trainer.callbacks == [early_stopping1, progress_bar]
# same callback type twice, different instance
trainer = _attach_callbacks(
trainer_callbacks=[progress_bar, EarlyStopping(monitor="red")],
model_callbacks=[early_stopping1],
)
assert trainer.callbacks == [progress_bar, early_stopping1]
# multiple callbacks of the same type in trainer
trainer = _attach_callbacks(
trainer_callbacks=[
LearningRateMonitor(),
EarlyStopping(monitor="yellow"),
LearningRateMonitor(),
EarlyStopping(monitor="black"),
],
model_callbacks=[early_stopping1, lr_monitor],
)
assert trainer.callbacks == [early_stopping1, lr_monitor]
# multiple callbacks of the same type, in both trainer and model
trainer = _attach_callbacks(
trainer_callbacks=[
LearningRateMonitor(),
progress_bar,
EarlyStopping(monitor="yellow"),
LearningRateMonitor(),
EarlyStopping(monitor="black"),
],
model_callbacks=[early_stopping1, lr_monitor, grad_accumulation, early_stopping2],
)
assert trainer.callbacks == [progress_bar, early_stopping1, lr_monitor, grad_accumulation, early_stopping2]
class CustomProgressBar(TQDMProgressBar): ...
custom_progress_bar = CustomProgressBar()
# a custom callback that overrides ours
trainer = _attach_callbacks(trainer_callbacks=[progress_bar], model_callbacks=[custom_progress_bar])
assert trainer.callbacks == [custom_progress_bar]
# edge case
bare_callback = Callback()
trainer = _attach_callbacks(trainer_callbacks=[bare_callback], model_callbacks=[custom_progress_bar])
assert trainer.callbacks == [bare_callback, custom_progress_bar]
def test_attach_model_callbacks_override_info(caplog):
"""Test that the logs contain the info about overriding callbacks returned by configure_callbacks."""
model = LightningModule()
model.configure_callbacks = lambda: [LearningRateMonitor(), EarlyStopping(monitor="foo")]
trainer = Trainer(
enable_checkpointing=False, callbacks=[EarlyStopping(monitor="foo"), LearningRateMonitor(), TQDMProgressBar()]
)
trainer.strategy._lightning_module = model
cb_connector = _CallbackConnector(trainer)
with caplog.at_level(logging.INFO):
cb_connector._attach_model_callbacks()
assert "existing callbacks passed to Trainer: EarlyStopping, LearningRateMonitor" in caplog.text
|
StatefulCallbackContent1
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py
|
{
"start": 4165,
"end": 4256
}
|
class ____(TypedDict):
x: ReadOnly[NotRequired[int]]
y: ReadOnly[Required[int]]
|
TD_B1
|
python
|
pytorch__pytorch
|
torch/_subclasses/meta_utils.py
|
{
"start": 21975,
"end": 22234
}
|
class ____(Protocol, Generic[_TensorT_cov]):
def __call__(
self,
arg: Callable[[], torch.Tensor],
/,
**kwargs: Unpack[_MetaTensorCallbackKwargs],
) -> _TensorT_cov: ...
@dataclass(frozen=True)
|
_MetaTensorCallbackOptDevice
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 17512,
"end": 17610
}
|
class ____(models.Model):
recipient = models.ForeignKey(Manager, on_delete=models.CASCADE)
|
Bonus
|
python
|
walkccc__LeetCode
|
solutions/409. Longest Palindrome/409.py
|
{
"start": 0,
"end": 267
}
|
class ____:
def longestPalindrome(self, s: str) -> int:
ans = 0
count = collections.Counter(s)
for c in count.values():
ans += c if c % 2 == 0 else c - 1
hasOddCount = any(c % 2 == 1 for c in count.values())
return ans + hasOddCount
|
Solution
|
python
|
numba__numba
|
numba/tests/test_looplifting.py
|
{
"start": 6250,
"end": 8383
}
|
class ____(TestCase):
def test_annotate_1(self):
"""
Verify that annotation works as expected with one lifted loop
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
for i in range(x.size):
x[i] += 1
return x
cfoo = jit(forceobj=True)(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, xcopy + 1)
buf = StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line = foo.__code__.co_firstlineno + 2 # 2 lines down from func head
self.assertIn("Loop at line {line}".format(line=line), annotation)
self.assertIn("Has 1 overloads", annotation)
def test_annotate_2(self):
"""
Verify that annotation works as expected with two lifted loops
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
# first lifted loop
for i in range(x.size):
x[i] += 1
# second lifted loop
for j in range(x.size):
x[j] *= 2
return x
cfoo = jit(forceobj=True)(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, (xcopy + 1) * 2)
buf = StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line1 = foo.__code__.co_firstlineno + 3 # 3 lines down from func head
line2 = foo.__code__.co_firstlineno + 6 # 6 lines down from func head
self.assertIn("Loop at line {line}".format(line=line1), annotation)
self.assertIn("Loop at line {line}".format(line=line2), annotation)
|
TestLoopLiftingAnnotate
|
python
|
PyCQA__pylint
|
pylint/checkers/spelling.py
|
{
"start": 2974,
"end": 3316
}
|
class ____(Filter): # type: ignore[misc]
"""Parent class for filters using regular expressions.
This filter skips any words the match the expression
assigned to the class attribute ``_pattern``.
"""
_pattern: Pattern[str]
def _skip(self, word: str) -> bool:
return bool(self._pattern.match(word))
|
RegExFilter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
|
{
"start": 1302,
"end": 10463
}
|
class ____(test_util.TensorFlowTestCase):
def assertRaggedMapInnerValuesReturns(self,
op,
expected,
args=(),
kwargs=None):
kwargs = kwargs or {}
result = ragged_functional_ops.map_flat_values(op, *args, **kwargs)
self.assertAllEqual(result, expected)
def testDocStringExamples(self):
"""Test the examples in apply_op_to_ragged_values.__doc__."""
rt = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6]])
v1 = ragged_functional_ops.map_flat_values(array_ops.ones_like, rt)
v2 = ragged_functional_ops.map_flat_values(math_ops.multiply, rt, rt)
v3 = ragged_functional_ops.map_flat_values(math_ops.add, rt, 5)
self.assertAllEqual(v1, [[1, 1, 1], [], [1, 1], [1]])
self.assertAllEqual(v2, [[1, 4, 9], [], [16, 25], [36]])
self.assertAllEqual(v3, [[6, 7, 8], [], [9, 10], [11]])
def testOpWithSingleRaggedTensorArg(self):
tensor = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=array_ops.zeros_like,
args=(tensor,),
expected=[[0, 0, 0], [], [0, 0]])
def testOpWithTwoRaggedTensorArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(x, y), expected=[[3, 2, 12], [], [4, 25]])
def testOpWithRaggedTensorAndScalarArgs(self):
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(5, y), expected=[[5, 10, 15], [], [20, 25]])
def testOpWithThreeRaggedTensorArgs(self):
condition = ragged_factory_ops.constant(
[[True, True, False], [], [True, False]]) # pyformat: disable
x = ragged_factory_ops.constant([['a', 'b', 'c'], [], ['d', 'e']])
y = ragged_factory_ops.constant([['A', 'B', 'C'], [], ['D', 'E']])
self.assertRaggedMapInnerValuesReturns(
op=array_ops.where_v2,
args=(condition, x, y),
expected=[[b'a', b'b', b'C'], [], [b'd', b'E']])
def testOpWithRaggedTensorListArg(self):
x = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
y = ragged_factory_ops.constant([[10, 20, 30], [], [40, 50]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.add_n,
args=([x, y, x],),
expected=[[12, 24, 36], [], [48, 60]])
def testOpWithKeywordArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
kwargs=dict(x=x, y=y),
expected=[[3, 2, 12], [], [4, 25]])
def testOpWithMixedPositionalAndKeywordArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x,),
kwargs=dict(y=y),
expected=[[3, 2, 12], [], [4, 25]])
def testNonElementWiseOp(self):
x = ragged_factory_ops.constant(
[[[3, 1, 4], [1, 5, 9], [2, 6, 5]], [], [[3, 5, 8], [9, 7, 9]]],
ragged_rank=1)
self.assertRaggedMapInnerValuesReturns(
op=math_ops.reduce_sum,
kwargs={
'input_tensor': x,
'axis': 1,
},
expected=[[8, 15, 13], [], [16, 25]])
def testOpWithRaggedRankGreaterThanOne(self):
# ragged_rank=0
x0 = [3, 1, 4, 1, 5, 9, 2, 6, 5]
y0 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertAllEqual(
math_ops.multiply(x0, y0), [3, 2, 12, 4, 25, 54, 14, 48, 45])
# ragged_rank=1
x1 = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5], [9, 2], [6, 5]])
y1 = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6, 7], [8, 9]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x1, y1),
expected=[[3, 2, 12], [], [4, 25], [54, 14], [48, 45]])
# ragged_rank=2
x2 = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]],
[[9, 2], [6, 5]]])
y2 = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]],
[[6, 7], [8, 9]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x2, y2),
expected=[[[3, 2, 12]], # row 0
[], # row 1
[[], [4, 25]], # row 2
[[54, 14], [48, 45]] # row 3
]) # pyformat: disable
# ragged_rank=3
x3 = ragged_factory_ops.constant([[[[3, 1, 4]], []], [], [[[], [1, 5]]],
[[[9, 2], [6, 5]]]])
y3 = ragged_factory_ops.constant([[[[1, 2, 3]], []], [], [[[], [4, 5]]],
[[[6, 7], [8, 9]]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x3, y3),
expected=[
[[[3, 2, 12]], []], # row 0
[], # row 1
[[[], [4, 25]]], # row 2
[[[54, 14], [48, 45]]] # row 3
]) # pyformat: disable
def testOpWithRaggedRankThree(self):
x = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]]])
y = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x, y),
expected=[[[3, 2, 12]], [], [[], [4, 25]]])
def testOpWithInnerValuesOnly(self):
x = constant_op.constant([[1, 2], [3, 4], [5, 6]])
y = constant_op.constant(2)
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(x, y), expected=[[2, 4], [6, 8], [10, 12]])
def testRaggedTensorSplitsRaggedRankMismatchError(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[[3, 1, 4], []], [], [[1, 5]]])
with self.assertRaisesRegex(
ValueError, r'All ragged inputs must have the same ragged_rank.'):
ragged_functional_ops.map_flat_values(math_ops.add, x, y)
def testRaggedTensorSplitsValueMismatchError(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1], [2, 3], [4, 5]])
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r'partitions have incompatible'):
ragged_functional_ops.map_flat_values(math_ops.add, x, y)
z_splits = array_ops.placeholder_with_default(
constant_op.constant([0, 3], dtypes.int64), None)
z = ragged_tensor.RaggedTensor.from_row_splits([0, 1, 2], z_splits)
with self.assertRaisesRegex(
ValueError,
r"Input RaggedTensors' flat_values must all have the same "
r'outer-dimension size. Got sizes: \{3, 5\}'):
ragged_functional_ops.map_flat_values(math_ops.add, x, z)
def testRaggedTensorShapeMismatchError(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
with self.assertRaisesRegex(
ValueError, r'tf.ragged.map_flat_values requires that the output of '
'`op` have the same outer-dimension size as flat_values of any ragged '
r'inputs. \(output shape: \(\); expected outer dimension size: 5\)'):
ragged_functional_ops.map_flat_values(math_ops.argmax, x)
def testRaggedTensorSplitsMismatchErrorAtRuntime(self):
splits1 = array_ops.placeholder_with_default(
constant_op.constant([0, 3, 3, 5], dtypes.int64), None)
splits2 = array_ops.placeholder_with_default(
constant_op.constant([0, 1, 3, 5], dtypes.int64), None)
x = ragged_tensor.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1)
y = ragged_tensor.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r'partitions have incompatible'):
self.evaluate(ragged_functional_ops.map_flat_values(math_ops.add, x, y))
def testRaggedMapFnPreservesUniformRowLength(self):
# x and y are equal, except that x has uniform_row_length and y does not.
x = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_factory_ops.constant([[1, 2], [3]]), uniform_row_length=2)
y = ragged_factory_ops.constant([[[1, 2], [3]]])
a = ragged_functional_ops.map_flat_values(math_ops.add, x, y)
self.assertAllEqual(x.uniform_row_length, a.uniform_row_length)
b = ragged_functional_ops.map_flat_values(math_ops.add, y, x)
self.assertAllEqual(x.uniform_row_length, b.uniform_row_length)
c = ragged_functional_ops.map_flat_values(math_ops.add_n, [x, x])
self.assertAllEqual(x.uniform_row_length, c.uniform_row_length)
d = ragged_functional_ops.map_flat_values(math_ops.add_n, [y, x, y])
self.assertAllEqual(x.uniform_row_length, d.uniform_row_length)
if __name__ == '__main__':
googletest.main()
|
RaggedMapInnerValuesOpTest
|
python
|
doocs__leetcode
|
solution/0100-0199/0144.Binary Tree Preorder Traversal/Solution2.py
|
{
"start": 192,
"end": 596
}
|
class ____:
def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans = []
if root is None:
return ans
stk = [root]
while stk:
node = stk.pop()
ans.append(node.val)
if node.right:
stk.append(node.right)
if node.left:
stk.append(node.left)
return ans
|
Solution
|
python
|
numpy__numpy
|
numpy/distutils/command/config.py
|
{
"start": 1229,
"end": 20334
}
|
class ____(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and (self.compiler.compiler_type in
('msvc', 'intelw', 'intelemw')):
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an OSError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print a helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except OSError as e:
msg = textwrap.dedent("""\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2015 as of this writing).
Original exception was: %s, and the Compiler class was %s
============================================================================""") \
% (e, self.compiler.__class__.__name__)
print(textwrap.dedent("""\
============================================================================"""))
raise distutils.errors.DistutilsPlatformError(msg) from e
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
if self.compiler is None:
raise CompileError('%s compiler is not set' % (lang,))
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError) as e:
self.compiler = save_compiler
raise CompileError from e
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
src, obj = self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
# _compile in unixcompiler.py sometimes creates .d dependency files.
# Clean them up.
self.temp_files.append(obj + '.d')
return src, obj
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
try:
d = subprocess.check_output(['cygpath',
'-w', d])
except (OSError, subprocess.CalledProcessError):
pass
else:
d = filepath_from_subprocess_output(d)
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = textwrap.dedent("""
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""") % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = textwrap.dedent("""
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""") % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = textwrap.dedent(r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""") % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = textwrap.dedent(r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
""")
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = textwrap.dedent(r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
""")
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = textwrap.dedent(r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
""")
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
library_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionary, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc(self):
"""Return True if the C compiler is gcc"""
return check_compiler_gcc(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
code, include):
return check_gcc_function_attribute_with_intrinsics(self, attribute,
name, code, include)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
"""Return True if the GCC version is greater than or equal to the
specified version."""
return check_gcc_version_at_least(self, major, minor, patchlevel)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
# 2008-11-16, RemoveMe
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
"Usage of get_output is deprecated: please do not \n"
"use it anymore, and avoid configuration checks \n"
"involving running executable on the target machine.\n"
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except Exception:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
try:
# specify cwd arg for consistency with
# historic usage pattern of exec_command()
# also, note that exe appears to be a string,
# which exec_command() handled, but we now
# use a list for check_output() -- this assumes
# that exe is always a single command
output = subprocess.check_output([exe], cwd='.')
except subprocess.CalledProcessError as exc:
exitstatus = exc.returncode
output = ''
except OSError:
# preserve the EnvironmentError exit status
# used historically in exec_command()
exitstatus = 127
output = ''
else:
output = filepath_from_subprocess_output(output)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
|
config
|
python
|
getsentry__sentry
|
src/sentry/integrations/messaging/metrics.py
|
{
"start": 1867,
"end": 2745
}
|
class ____(IntegrationEventLifecycleMetric):
"""An instance to be recorded of a user interacting through a messaging app."""
interaction_type: MessagingInteractionType
spec: MessagingIntegrationSpec
# Optional attributes to populate extras
user: User | RpcUser | None = None
organization: Organization | RpcOrganization | None = None
def get_integration_domain(self) -> IntegrationDomain:
return IntegrationDomain.MESSAGING
def get_integration_name(self) -> str:
return self.spec.provider_slug
def get_interaction_type(self) -> str:
return str(self.interaction_type)
def get_extras(self) -> Mapping[str, Any]:
return {
"user_id": (self.user.id if self.user else None),
"organization_id": (self.organization.id if self.organization else None),
}
|
MessagingInteractionEvent
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/frames.py
|
{
"start": 104983,
"end": 108553
}
|
class ____(NonStrictDataModel):
"""
:param rule_index: Rule index
:type rule_index: int
:param name: Rule name
:type name: str
:param count: Number of frames matching this rule
:type count: int
:param accurate: True if the provided count is accurate. If False, 'reason'
will contain the reason why.
:type accurate: bool
:param reason: Reason for the count being inaccurate if 'accurate' is True,
empty otherwise.
:type reason: str
"""
_schema = {
"properties": {
"accurate": {
"description": (
"True if the provided count is accurate. If False, 'reason' will contain the reason why."
),
"type": ["boolean", "null"],
},
"count": {
"description": "Number of frames matching this rule",
"type": ["integer", "null"],
},
"name": {"description": "Rule name", "type": ["string", "null"]},
"reason": {
"description": "Reason for the count being inaccurate if 'accurate' is True, empty otherwise.",
"type": ["string", "null"],
},
"rule_index": {"description": "Rule index", "type": ["integer", "null"]},
},
"type": "object",
}
def __init__(
self,
rule_index=None,
name=None,
count=None,
accurate=None,
reason=None,
**kwargs
):
super(RuleCount, self).__init__(**kwargs)
self.rule_index = rule_index
self.name = name
self.count = count
self.accurate = accurate
self.reason = reason
@schema_property("rule_index")
def rule_index(self):
return self._property_rule_index
@rule_index.setter
def rule_index(self, value):
if value is None:
self._property_rule_index = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "rule_index", six.integer_types)
self._property_rule_index = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("count")
def count(self):
return self._property_count
@count.setter
def count(self, value):
if value is None:
self._property_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "count", six.integer_types)
self._property_count = value
@schema_property("accurate")
def accurate(self):
return self._property_accurate
@accurate.setter
def accurate(self, value):
if value is None:
self._property_accurate = None
return
self.assert_isinstance(value, "accurate", (bool,))
self._property_accurate = value
@schema_property("reason")
def reason(self):
return self._property_reason
@reason.setter
def reason(self, value):
if value is None:
self._property_reason = None
return
self.assert_isinstance(value, "reason", six.string_types)
self._property_reason = value
|
RuleCount
|
python
|
dask__dask
|
dask/_task_spec.py
|
{
"start": 17126,
"end": 18757
}
|
class ____(GraphNode):
value: Any
typ: type
__slots__ = tuple(__annotations__)
def __init__(self, key: Any, value: Any):
if key is None:
key = (type(value).__name__, next(_anom_count))
self.key = key
self.value = value
self.typ = type(value)
self._dependencies = _no_deps
@property
def data_producer(self) -> bool:
return True
def copy(self):
return DataNode(self.key, self.value)
def __call__(self, values=()):
return self.value
def __repr__(self):
return f"DataNode({self.value!r})"
def __reduce__(self):
return (DataNode, (self.key, self.value))
def __dask_tokenize__(self):
from dask.base import tokenize
return (type(self).__name__, tokenize(self.value))
def substitute(
self, subs: dict[KeyType, KeyType | GraphNode], key: KeyType | None = None
) -> DataNode:
if key is not None and key != self.key:
return DataNode(key, self.value)
return self
def __iter__(self):
return iter(self.value)
def _get_dependencies(obj: object) -> set | frozenset:
if isinstance(obj, TaskRef):
return {obj.key}
elif isinstance(obj, GraphNode):
return obj.dependencies
elif isinstance(obj, dict):
if not obj:
return _no_deps
return set().union(*map(_get_dependencies, obj.values()))
elif isinstance(obj, (list, tuple, frozenset, set)):
if not obj:
return _no_deps
return set().union(*map(_get_dependencies, obj))
return _no_deps
|
DataNode
|
python
|
numba__numba
|
numba/core/ir.py
|
{
"start": 11348,
"end": 19376
}
|
class ____(Inst):
"""
An IR expression (an instruction which can only be part of a larger
statement).
"""
def __init__(self, op, loc, **kws):
assert isinstance(op, str)
assert isinstance(loc, Loc)
self.op = op
self.loc = loc
self._kws = kws
def __getattr__(self, name):
if name.startswith('_'):
return Inst.__getattr__(self, name)
return self._kws[name]
def __setattr__(self, name, value):
if name in ('op', 'loc', '_kws'):
self.__dict__[name] = value
else:
self._kws[name] = value
@classmethod
def binop(cls, fn, lhs, rhs, loc):
assert isinstance(fn, BuiltinFunctionType)
assert isinstance(lhs, Var)
assert isinstance(rhs, Var)
assert isinstance(loc, Loc)
op = 'binop'
return cls(op=op, loc=loc, fn=fn, lhs=lhs, rhs=rhs,
static_lhs=UNDEFINED, static_rhs=UNDEFINED)
@classmethod
def inplace_binop(cls, fn, immutable_fn, lhs, rhs, loc):
assert isinstance(fn, BuiltinFunctionType)
assert isinstance(immutable_fn, BuiltinFunctionType)
assert isinstance(lhs, Var)
assert isinstance(rhs, Var)
assert isinstance(loc, Loc)
op = 'inplace_binop'
return cls(op=op, loc=loc, fn=fn, immutable_fn=immutable_fn,
lhs=lhs, rhs=rhs,
static_lhs=UNDEFINED, static_rhs=UNDEFINED)
@classmethod
def unary(cls, fn, value, loc):
assert isinstance(value, (str, Var, FunctionType))
assert isinstance(loc, Loc)
op = 'unary'
fn = UNARY_BUITINS_TO_OPERATORS.get(fn, fn)
return cls(op=op, loc=loc, fn=fn, value=value)
@classmethod
def call(cls, func, args, kws, loc, vararg=None, varkwarg=None, target=None):
assert isinstance(func, Var)
assert isinstance(loc, Loc)
op = 'call'
return cls(op=op, loc=loc, func=func, args=args, kws=kws,
vararg=vararg, varkwarg=varkwarg, target=target)
@classmethod
def build_tuple(cls, items, loc):
assert isinstance(loc, Loc)
op = 'build_tuple'
return cls(op=op, loc=loc, items=items)
@classmethod
def build_list(cls, items, loc):
assert isinstance(loc, Loc)
op = 'build_list'
return cls(op=op, loc=loc, items=items)
@classmethod
def build_set(cls, items, loc):
assert isinstance(loc, Loc)
op = 'build_set'
return cls(op=op, loc=loc, items=items)
@classmethod
def build_map(cls, items, size, literal_value, value_indexes, loc):
assert isinstance(loc, Loc)
op = 'build_map'
return cls(op=op, loc=loc, items=items, size=size,
literal_value=literal_value, value_indexes=value_indexes)
@classmethod
def pair_first(cls, value, loc):
assert isinstance(value, Var)
op = 'pair_first'
return cls(op=op, loc=loc, value=value)
@classmethod
def pair_second(cls, value, loc):
assert isinstance(value, Var)
assert isinstance(loc, Loc)
op = 'pair_second'
return cls(op=op, loc=loc, value=value)
@classmethod
def getiter(cls, value, loc):
assert isinstance(value, Var)
assert isinstance(loc, Loc)
op = 'getiter'
return cls(op=op, loc=loc, value=value)
@classmethod
def iternext(cls, value, loc):
assert isinstance(value, Var)
assert isinstance(loc, Loc)
op = 'iternext'
return cls(op=op, loc=loc, value=value)
@classmethod
def exhaust_iter(cls, value, count, loc):
assert isinstance(value, Var)
assert isinstance(count, int)
assert isinstance(loc, Loc)
op = 'exhaust_iter'
return cls(op=op, loc=loc, value=value, count=count)
@classmethod
def getattr(cls, value, attr, loc):
assert isinstance(value, Var)
assert isinstance(attr, str)
assert isinstance(loc, Loc)
op = 'getattr'
return cls(op=op, loc=loc, value=value, attr=attr)
@classmethod
def getitem(cls, value, index, loc):
assert isinstance(value, Var)
assert isinstance(index, Var)
assert isinstance(loc, Loc)
op = 'getitem'
fn = operator.getitem
return cls(op=op, loc=loc, value=value, index=index, fn=fn)
@classmethod
def typed_getitem(cls, value, dtype, index, loc):
assert isinstance(value, Var)
assert isinstance(loc, Loc)
op = 'typed_getitem'
return cls(op=op, loc=loc, value=value, dtype=dtype,
index=index)
@classmethod
def static_getitem(cls, value, index, index_var, loc):
assert isinstance(value, Var)
assert index_var is None or isinstance(index_var, Var)
assert isinstance(loc, Loc)
op = 'static_getitem'
fn = operator.getitem
return cls(op=op, loc=loc, value=value, index=index,
index_var=index_var, fn=fn)
@classmethod
def cast(cls, value, loc):
"""
A node for implicit casting at the return statement
"""
assert isinstance(value, Var)
assert isinstance(loc, Loc)
op = 'cast'
return cls(op=op, value=value, loc=loc)
@classmethod
def phi(cls, loc):
"""Phi node
"""
assert isinstance(loc, Loc)
return cls(op='phi', incoming_values=[], incoming_blocks=[], loc=loc)
@classmethod
def make_function(cls, name, code, closure, defaults, loc):
"""
A node for making a function object.
"""
assert isinstance(loc, Loc)
op = 'make_function'
return cls(op=op, name=name, code=code, closure=closure, defaults=defaults, loc=loc)
@classmethod
def null(cls, loc):
"""
A node for null value.
This node is not handled by type inference. It is only added by
post-typing passes.
"""
assert isinstance(loc, Loc)
op = 'null'
return cls(op=op, loc=loc)
@classmethod
def undef(cls, loc):
"""
A node for undefined value specifically from LOAD_FAST_AND_CLEAR opcode.
"""
assert isinstance(loc, Loc)
op = 'undef'
return cls(op=op, loc=loc)
@classmethod
def dummy(cls, op, info, loc):
"""
A node for a dummy value.
This node is a place holder for carrying information through to a point
where it is rewritten into something valid. This node is not handled
by type inference or lowering. It's presence outside of the interpreter
renders IR as illegal.
"""
assert isinstance(loc, Loc)
assert isinstance(op, str)
return cls(op=op, info=info, loc=loc)
def __repr__(self):
if self.op == 'call':
args = ', '.join(str(a) for a in self.args)
pres_order = self._kws.items() if config.DIFF_IR == 0 else sorted(self._kws.items())
kws = ', '.join('%s=%s' % (k, v) for k, v in pres_order)
vararg = '*%s' % (self.vararg,) if self.vararg is not None else ''
arglist = ', '.join(filter(None, [args, vararg, kws]))
return 'call %s(%s)' % (self.func, arglist)
elif self.op == 'binop':
lhs, rhs = self.lhs, self.rhs
if self.fn == operator.contains:
lhs, rhs = rhs, lhs
fn = OPERATORS_TO_BUILTINS.get(self.fn, self.fn)
return '%s %s %s' % (lhs, fn, rhs)
else:
pres_order = self._kws.items() if config.DIFF_IR == 0 else sorted(self._kws.items())
args = ('%s=%s' % (k, v) for k, v in pres_order)
return '%s(%s)' % (self.op, ', '.join(args))
def list_vars(self):
return self._rec_list_vars(self._kws)
def infer_constant(self):
raise ConstantInferenceError('%s' % self, loc=self.loc)
|
Expr
|
python
|
doocs__leetcode
|
solution/2800-2899/2863.Maximum Length of Semi-Decreasing Subarrays/Solution.py
|
{
"start": 0,
"end": 329
}
|
class ____:
def maxSubarrayLength(self, nums: List[int]) -> int:
d = defaultdict(list)
for i, x in enumerate(nums):
d[x].append(i)
ans, k = 0, inf
for x in sorted(d, reverse=True):
ans = max(ans, d[x][-1] - k + 1)
k = min(k, d[x][0])
return ans
|
Solution
|
python
|
jazzband__django-redis
|
django_redis/compressors/zlib.py
|
{
"start": 124,
"end": 538
}
|
class ____(BaseCompressor):
min_length = 15
preset = 6
def compress(self, value: bytes) -> bytes:
if len(value) > self.min_length:
return zlib.compress(value, self.preset)
return value
def decompress(self, value: bytes) -> bytes:
try:
return zlib.decompress(value)
except zlib.error as e:
raise CompressorError from e
|
ZlibCompressor
|
python
|
mwaskom__seaborn
|
seaborn/external/kde.py
|
{
"start": 2870,
"end": 13726
}
|
class ____:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = f"points have dimension {d}, dataset has dimension {self.d}"
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
result = zeros((m,), dtype=output_dtype)
whitening = linalg.cholesky(self.inv_cov)
scaled_dataset = dot(whitening, self.dataset)
scaled_points = dot(whitening, points)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = scaled_dataset[:, i, newaxis] - scaled_points
energy = sum(diff * diff, axis=0) / 2.0
result += self.weights[i]*exp(-energy)
else:
# loop over points
for i in range(m):
diff = scaled_dataset - scaled_points[:, i, newaxis]
energy = sum(diff * diff, axis=0) / 2.0
result[i] = sum(exp(-energy)*self.weights, axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
|
gaussian_kde
|
python
|
getsentry__sentry
|
tests/sentry/api/serializers/test_project_template.py
|
{
"start": 311,
"end": 2956
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.organization = self.create_organization()
self.project_template = self.create_project_template(organization=self.organization)
self.option = ProjectTemplateOption.objects.create(
project_template=self.project_template, key="key1", value="value1"
)
def test_serialize(self) -> None:
serializer = ProjectTemplateSerializer()
result = serializer.serialize(self.project_template, {}, self.user)
assert result == {
"id": self.project_template.id,
"name": self.project_template.name,
"createdAt": self.project_template.date_added,
"updatedAt": self.project_template.date_updated,
}
def test_serialize__expand_options(self) -> None:
serializer = ProjectTemplateSerializer(expand=[ProjectTemplateAttributes.OPTIONS])
attrs: Mapping[str, Any] = {
ProjectTemplateAttributes.OPTIONS: {
"key1": "value1",
}
}
result = serializer.serialize(self.project_template, attrs, self.user)
assert result == {
"id": self.project_template.id,
"name": self.project_template.name,
"createdAt": self.project_template.date_added,
"updatedAt": self.project_template.date_updated,
"options": {"key1": "value1"},
}
def test_get_attrs(self) -> None:
serializer = ProjectTemplateSerializer(expand=[ProjectTemplateAttributes.OPTIONS])
result = serializer.get_attrs([self.project_template], self.user)
assert result == {
self.project_template: {
ProjectTemplateAttributes.OPTIONS: {
"key1": "value1",
},
},
}
def test_get_attrs__without_options(self) -> None:
serializer = ProjectTemplateSerializer()
result = serializer.get_attrs([self.project_template], self.user)
assert result == {
self.project_template: {},
}
# other checks like looking at more attributes are validated with types
def test_expand(self) -> None:
serializer = ProjectTemplateSerializer(expand=[ProjectTemplateAttributes.OPTIONS])
assert serializer._expand(ProjectTemplateAttributes.OPTIONS) is True
def test_expand__without_being_set(self) -> None:
serializer = ProjectTemplateSerializer(expand=[])
assert serializer._expand(ProjectTemplateAttributes.OPTIONS) is False
|
ProjectTemplateSerializerTest
|
python
|
apache__airflow
|
airflow-core/src/airflow/dag_processing/collection.py
|
{
"start": 4864,
"end": 17165
}
|
class ____(NamedTuple):
latest_runs: dict[str, DagRun]
num_active_runs: dict[str, int]
@classmethod
def calculate(cls, dags: dict[str, LazyDeserializedDAG], *, session: Session) -> Self:
"""
Query the run counts from the db.
:param dags: dict of dags to query
"""
# Skip these queries entirely if no DAGs can be scheduled to save time.
if not any(dag.timetable.can_be_scheduled for dag in dags.values()):
return cls({}, {})
latest_runs = {run.dag_id: run for run in session.scalars(_get_latest_runs_stmt(dag_ids=dags.keys()))}
active_run_counts = DagRun.active_runs_of_dags(
dag_ids=dags.keys(),
exclude_backfill=True,
session=session,
)
return cls(latest_runs, active_run_counts)
def _update_dag_tags(tag_names: set[str], dm: DagModel, *, session: Session) -> None:
orm_tags = {t.name: t for t in dm.tags}
tags_to_delete = []
for name, orm_tag in orm_tags.items():
if name not in tag_names:
session.delete(orm_tag)
tags_to_delete.append(orm_tag)
tags_to_add = tag_names.difference(orm_tags)
if tags_to_delete:
# Remove deleted tags from the collection to keep it in sync
for tag in tags_to_delete:
dm.tags.remove(tag)
# Check if there's a potential case-only rename on MySQL (e.g., 'tag' -> 'TAG').
# MySQL uses case-insensitive collation for the (name, dag_id) primary key by default,
# which can cause duplicate key errors when renaming tags with only case changes.
if get_dialect_name(session) == "mysql":
orm_tags_lower = {name.lower(): name for name in orm_tags}
has_case_only_change = any(tag.lower() in orm_tags_lower for tag in tags_to_add)
if has_case_only_change:
# Force DELETE operations to execute before INSERT operations.
session.flush()
# Refresh the tags relationship from the database to reflect the deletions.
session.expire(dm, ["tags"])
dm.tags.extend(DagTag(name=name, dag_id=dm.dag_id) for name in tags_to_add)
def _update_dag_owner_links(dag_owner_links: dict[str, str], dm: DagModel, *, session: Session) -> None:
orm_dag_owner_attributes = {obj.owner: obj for obj in dm.dag_owner_links}
for owner, obj in orm_dag_owner_attributes.items():
try:
link = dag_owner_links[owner]
except KeyError:
session.delete(obj)
else:
if obj.link != link:
obj.link = link
dm.dag_owner_links.extend(
DagOwnerAttributes(dag_id=dm.dag_id, owner=owner, link=link)
for owner, link in dag_owner_links.items()
if owner not in orm_dag_owner_attributes
)
def _serialize_dag_capturing_errors(
dag: LazyDeserializedDAG, bundle_name, session: Session, bundle_version: str | None
):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
from airflow import settings
from airflow.models.dagcode import DagCode
from airflow.models.serialized_dag import SerializedDagModel
try:
# We can't use bulk_write_to_db as we want to capture each error individually
dag_was_updated = SerializedDagModel.write_dag(
dag,
bundle_name=bundle_name,
bundle_version=bundle_version,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
if not dag_was_updated:
# Check and update DagCode
DagCode.update_source_code(dag.dag_id, dag.fileloc)
if "FabAuthManager" in conf.get("core", "auth_manager"):
_sync_dag_perms(dag, session=session)
return []
except OperationalError:
raise
except Exception:
log.exception("Failed to write serialized DAG dag_id=%s fileloc=%s", dag.dag_id, dag.fileloc)
dagbag_import_error_traceback_depth = conf.getint("core", "dagbag_import_error_traceback_depth")
return [
(
(bundle_name, dag.relative_fileloc),
traceback.format_exc(limit=-dagbag_import_error_traceback_depth),
)
]
def _sync_dag_perms(dag: LazyDeserializedDAG, session: Session):
"""Sync DAG specific permissions."""
dag_id = dag.dag_id
log.debug("Syncing DAG permissions: %s to the DB", dag_id)
from airflow.providers.fab.www.security_appless import ApplessAirflowSecurityManager
security_manager = ApplessAirflowSecurityManager(session=session)
security_manager.sync_perm_for_dag(dag_id, dag.access_control)
def _update_dag_warnings(
dag_ids: list[str], warnings: set[DagWarning], warning_types: tuple[DagWarningType], session: Session
):
from airflow.models.dagwarning import DagWarning
stored_warnings = set(
session.scalars(
select(DagWarning).where(
DagWarning.dag_id.in_(dag_ids),
DagWarning.warning_type.in_(warning_types),
)
)
)
for warning_to_delete in stored_warnings - warnings:
session.delete(warning_to_delete)
for warning_to_add in warnings:
session.merge(warning_to_add)
def _update_import_errors(
files_parsed: set[tuple[str, str]],
bundle_name: str,
import_errors: dict[tuple[str, str], str],
session: Session,
):
from airflow.listeners.listener import get_listener_manager
# Check existing import errors BEFORE deleting, so we can determine if we should update or create
existing_import_error_files = set(
session.execute(select(ParseImportError.bundle_name, ParseImportError.filename))
)
# Delete errors for files that were parsed but don't have errors in import_errors
# (i.e., files that were successfully parsed without errors)
files_to_clear = files_parsed.difference(import_errors)
if files_to_clear:
session.execute(
delete(ParseImportError).where(
tuple_(ParseImportError.bundle_name, ParseImportError.filename).in_(files_to_clear)
)
)
# Add or update the errors of the processed files
for key, stacktrace in import_errors.items():
bundle_name_, relative_fileloc = key
if key in existing_import_error_files:
session.execute(
update(ParseImportError)
.where(
ParseImportError.filename == relative_fileloc,
ParseImportError.bundle_name == bundle_name_,
)
.values(
filename=relative_fileloc,
bundle_name=bundle_name_,
timestamp=utcnow(),
stacktrace=stacktrace,
),
)
# sending notification when an existing dag import error occurs
try:
# todo: make listener accept bundle_name and relative_filename
import_error = session.scalar(
select(ParseImportError).where(
ParseImportError.bundle_name == bundle_name_,
ParseImportError.filename == relative_fileloc,
)
)
if import_error is not None:
get_listener_manager().hook.on_existing_dag_import_error(
filename=import_error.full_file_path(), stacktrace=stacktrace
)
except Exception:
log.exception("error calling listener")
else:
import_error = ParseImportError(
filename=relative_fileloc,
bundle_name=bundle_name,
timestamp=utcnow(),
stacktrace=stacktrace,
)
session.add(import_error)
# sending notification when a new dag import error occurs
try:
get_listener_manager().hook.on_new_dag_import_error(
filename=import_error.full_file_path(), stacktrace=stacktrace
)
except Exception:
log.exception("error calling listener")
session.execute(
update(DagModel)
.where(
DagModel.relative_fileloc == relative_fileloc,
)
.values(
has_import_errors=True,
bundle_name=bundle_name,
is_stale=True,
)
.execution_options(synchronize_session="fetch")
)
def update_dag_parsing_results_in_db(
bundle_name: str,
bundle_version: str | None,
dags: Collection[LazyDeserializedDAG],
import_errors: dict[tuple[str, str], str],
parse_duration: float | None,
warnings: set[DagWarning],
session: Session,
*,
warning_types: tuple[DagWarningType] = (DagWarningType.NONEXISTENT_POOL,),
files_parsed: set[tuple[str, str]] | None = None,
):
"""
Update everything to do with DAG parsing in the DB.
This function will create or update rows in the following tables:
- DagModel (`dag` table), DagTag, DagCode and DagVersion
- SerializedDagModel (`serialized_dag` table)
- ParseImportError (including with any errors as a result of serialization, not just parsing)
- DagWarning
- DAG Permissions
This function will not remove any rows for dags not passed in. It will remove parse errors and warnings
from dags/dag files that are passed in. In order words, if a DAG is passed in with a fileloc of `a.py`
then all warnings and errors related to this file will be removed.
``import_errors`` will be updated in place with an new errors
:param files_parsed: Set of (bundle_name, relative_fileloc) tuples for all files that were parsed.
If None, will be inferred from dags and import_errors. Passing this explicitly ensures that
import errors are cleared for files that were parsed but no longer contain DAGs.
"""
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in run_with_db_retries(logger=log):
with attempt:
serialize_errors = []
log.debug(
"Running dagbag.bulk_write_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
log.debug("Calling the DAG.bulk_sync_to_db method")
try:
SerializedDAG.bulk_write_to_db(
bundle_name, bundle_version, dags, parse_duration, session=session
)
# Write Serialized DAGs to DB, capturing errors
for dag in dags:
serialize_errors.extend(
_serialize_dag_capturing_errors(
dag=dag,
bundle_name=bundle_name,
bundle_version=bundle_version,
session=session,
)
)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
import_errors.update(serialize_errors)
# Record import errors into the ORM - we don't retry on this one as it's not as critical that it works
try:
_update_import_errors(
files_parsed=files_parsed if files_parsed is not None else set(),
bundle_name=bundle_name,
import_errors=import_errors,
session=session,
)
except Exception:
log.exception("Error logging import errors!")
# Record DAG warnings in the metadatabase.
try:
_update_dag_warnings([dag.dag_id for dag in dags], warnings, warning_types, session)
except Exception:
log.exception("Error logging DAG warnings.")
session.flush()
|
_RunInfo
|
python
|
huggingface__transformers
|
src/transformers/generation/logits_process.py
|
{
"start": 81101,
"end": 82841
}
|
class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder
models.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
>>> tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
>>> inputs = tokenizer("Translate from English to German: I love cats.", return_tensors="pt")
>>> # By default, it continues generating according to the model's logits
>>> outputs = model.generate(**inputs, max_new_tokens=10)
>>> print(tokenizer.batch_decode(outputs)[0])
<pad> Ich liebe Kitty.</s>
>>> # We can use `forced_bos_token_id` to force the start of generation with an encoder-decoder model
>>> # (including forcing it to end straight away with an EOS token)
>>> outputs = model.generate(**inputs, max_new_tokens=10, forced_bos_token_id=tokenizer.eos_token_id)
>>> print(tokenizer.batch_decode(outputs)[0])
<pad></s>
```
"""
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
scores_processed = scores
if cur_len == 1:
scores_processed = torch.full_like(scores, -math.inf)
scores_processed[:, self.bos_token_id] = 0
return scores_processed
|
ForcedBOSTokenLogitsProcessor
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink01.py
|
{
"start": 346,
"end": 2265
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url("A1", "http://www.perl.org/")
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks with write()"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write("A1", "http://www.perl.org/")
workbook.close()
def test_create_file_with_url_type(self):
"""Test the creation of a simple XlsxWriter using Url class"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
url = Url("http://www.perl.org/")
worksheet.write_url("A1", url)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_url_type_and_write(self):
"""Test the creation of a simple XlsxWriter using Url class"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
url = Url("http://www.perl.org/")
worksheet.write("A1", url)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/internal/proto_test.py
|
{
"start": 3409,
"end": 4152
}
|
class ____(unittest.TestCase):
def test_pytype_allows_unset_self_field(self):
self.assertEqual(
test_proto2_pb2.MessageWithSelfField(something=123).something, 123
)
def test_pytype_allows_unset_self_and_self_underscore_field(self):
self.assertEqual(
test_proto2_pb2.MessageWithSelfAndSelfUnderscoreField(
something=123
).something,
123,
)
_EXPECTED_PROTO3 = b'\x04r\x02hi\x06\x08\x01r\x02hi\x06\x08\x02r\x02hi'
_EXPECTED_PROTO2 = b'\x06\x08\x00r\x02hi\x06\x08\x01r\x02hi\x06\x08\x02r\x02hi'
@parameterized.named_parameters(
('_proto2', unittest_pb2, _EXPECTED_PROTO2),
('_proto3', unittest_proto3_arena_pb2, _EXPECTED_PROTO3),
)
@testing_refleaks.TestCase
|
SelfFieldTest
|
python
|
TheAlgorithms__Python
|
searches/hill_climbing.py
|
{
"start": 60,
"end": 6750
}
|
class ____:
"""
An interface to define search problems.
The interface will be illustrated using the example of mathematical function.
"""
def __init__(self, x: int, y: int, step_size: int, function_to_optimize):
"""
The constructor of the search problem.
x: the x coordinate of the current search state.
y: the y coordinate of the current search state.
step_size: size of the step to take when looking for neighbors.
function_to_optimize: a function to optimize having the signature f(x, y).
"""
self.x = x
self.y = y
self.step_size = step_size
self.function = function_to_optimize
def score(self) -> int:
"""
Returns the output of the function called with current x and y coordinates.
>>> def test_function(x, y):
... return x + y
>>> SearchProblem(0, 0, 1, test_function).score() # 0 + 0 = 0
0
>>> SearchProblem(5, 7, 1, test_function).score() # 5 + 7 = 12
12
"""
return self.function(self.x, self.y)
def get_neighbors(self):
"""
Returns a list of coordinates of neighbors adjacent to the current coordinates.
Neighbors:
| 0 | 1 | 2 |
| 3 | _ | 4 |
| 5 | 6 | 7 |
"""
step_size = self.step_size
return [
SearchProblem(x, y, step_size, self.function)
for x, y in (
(self.x - step_size, self.y - step_size),
(self.x - step_size, self.y),
(self.x - step_size, self.y + step_size),
(self.x, self.y - step_size),
(self.x, self.y + step_size),
(self.x + step_size, self.y - step_size),
(self.x + step_size, self.y),
(self.x + step_size, self.y + step_size),
)
]
def __hash__(self):
"""
hash the string representation of the current search state.
"""
return hash(str(self))
def __eq__(self, obj):
"""
Check if the 2 objects are equal.
"""
if isinstance(obj, SearchProblem):
return hash(str(self)) == hash(str(obj))
return False
def __str__(self):
"""
string representation of the current search state.
>>> str(SearchProblem(0, 0, 1, None))
'x: 0 y: 0'
>>> str(SearchProblem(2, 5, 1, None))
'x: 2 y: 5'
"""
return f"x: {self.x} y: {self.y}"
def hill_climbing(
search_prob,
find_max: bool = True,
max_x: float = math.inf,
min_x: float = -math.inf,
max_y: float = math.inf,
min_y: float = -math.inf,
visualization: bool = False,
max_iter: int = 10000,
) -> SearchProblem:
"""
Implementation of the hill climbling algorithm.
We start with a given state, find all its neighbors,
move towards the neighbor which provides the maximum (or minimum) change.
We keep doing this until we are at a state where we do not have any
neighbors which can improve the solution.
Args:
search_prob: The search state at the start.
find_max: If True, the algorithm should find the maximum else the minimum.
max_x, min_x, max_y, min_y: the maximum and minimum bounds of x and y.
visualization: If True, a matplotlib graph is displayed.
max_iter: number of times to run the iteration.
Returns a search state having the maximum (or minimum) score.
"""
current_state = search_prob
scores = [] # list to store the current score at each iteration
iterations = 0
solution_found = False
visited = set()
while not solution_found and iterations < max_iter:
visited.add(current_state)
iterations += 1
current_score = current_state.score()
scores.append(current_score)
neighbors = current_state.get_neighbors()
max_change = -math.inf
min_change = math.inf
next_state = None # to hold the next best neighbor
for neighbor in neighbors:
if neighbor in visited:
continue # do not want to visit the same state again
if (
neighbor.x > max_x
or neighbor.x < min_x
or neighbor.y > max_y
or neighbor.y < min_y
):
continue # neighbor outside our bounds
change = neighbor.score() - current_score
if find_max: # finding max
# going to direction with greatest ascent
if change > max_change and change > 0:
max_change = change
next_state = neighbor
elif change < min_change and change < 0: # finding min
# to direction with greatest descent
min_change = change
next_state = neighbor
if next_state is not None:
# we found at least one neighbor which improved the current state
current_state = next_state
else:
# since we have no neighbor that improves the solution we stop the search
solution_found = True
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(iterations), scores)
plt.xlabel("Iterations")
plt.ylabel("Function values")
plt.show()
return current_state
if __name__ == "__main__":
import doctest
doctest.testmod()
def test_f1(x, y):
return (x**2) + (y**2)
# starting the problem with initial coordinates (3, 4)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=False)
print(
"The minimum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
prob = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def test_f2(x, y):
return (3 * x**2) - (6 * y)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=True)
print(
"The maximum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
|
SearchProblem
|
python
|
keras-team__keras
|
keras/src/utils/file_utils_test.py
|
{
"start": 7835,
"end": 11934
}
|
class ____(test_case.TestCase):
def setUp(self):
"""Create temporary directories and files for testing."""
self.temp_dir = tempfile.mkdtemp()
self.file_content = "Hello, world!"
# Create sample files to be archived
with open(os.path.join(self.temp_dir, "sample.txt"), "w") as f:
f.write(self.file_content)
def tearDown(self):
"""Clean up temporary directories."""
shutil.rmtree(self.temp_dir)
def create_tar(self):
archive_path = os.path.join(self.temp_dir, "sample.tar")
with tarfile.open(archive_path, "w") as archive:
archive.add(
os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt"
)
return archive_path
def create_zip(self):
archive_path = os.path.join(self.temp_dir, "sample.zip")
with zipfile.ZipFile(archive_path, "w") as archive:
archive.write(
os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt"
)
return archive_path
def test_extract_tar(self):
archive_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "extract_tar")
result = file_utils.extract_archive(archive_path, extract_path, "tar")
self.assertTrue(result)
with open(os.path.join(extract_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_extract_zip(self):
archive_path = self.create_zip()
extract_path = os.path.join(self.temp_dir, "extract_zip")
result = file_utils.extract_archive(archive_path, extract_path, "zip")
self.assertTrue(result)
with open(os.path.join(extract_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_extract_auto(self):
# This will test the 'auto' functionality
tar_archive_path = self.create_tar()
zip_archive_path = self.create_zip()
extract_tar_path = os.path.join(self.temp_dir, "extract_auto_tar")
extract_zip_path = os.path.join(self.temp_dir, "extract_auto_zip")
self.assertTrue(
file_utils.extract_archive(tar_archive_path, extract_tar_path)
)
self.assertTrue(
file_utils.extract_archive(zip_archive_path, extract_zip_path)
)
with open(os.path.join(extract_tar_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
with open(os.path.join(extract_zip_path, "sample.txt"), "r") as f:
self.assertEqual(f.read(), self.file_content)
def test_non_existent_file(self):
extract_path = os.path.join(self.temp_dir, "non_existent")
with self.assertRaises(FileNotFoundError):
file_utils.extract_archive("non_existent.tar", extract_path)
def test_archive_format_none(self):
archive_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "none_format")
result = file_utils.extract_archive(archive_path, extract_path, None)
self.assertFalse(result)
def test_runtime_error_during_extraction(self):
tar_path = self.create_tar()
extract_path = os.path.join(self.temp_dir, "runtime_error_extraction")
with patch.object(
tarfile.TarFile, "extractall", side_effect=RuntimeError
):
with self.assertRaises(RuntimeError):
file_utils.extract_archive(tar_path, extract_path, "tar")
self.assertFalse(os.path.exists(extract_path))
def test_keyboard_interrupt_during_extraction(self):
tar_path = self.create_tar()
extract_path = os.path.join(
self.temp_dir, "keyboard_interrupt_extraction"
)
with patch.object(
tarfile.TarFile, "extractall", side_effect=KeyboardInterrupt
):
with self.assertRaises(KeyboardInterrupt):
file_utils.extract_archive(tar_path, extract_path, "tar")
self.assertFalse(os.path.exists(extract_path))
|
ExtractArchiveTest
|
python
|
huggingface__transformers
|
tests/models/sam/test_modeling_sam.py
|
{
"start": 5448,
"end": 9598
}
|
class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (SamVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = SamVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
expected_attention_shape = (
self.model_tester.batch_size * self.model_tester.num_attention_heads,
196,
196,
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-4:]),
list(expected_attention_shape),
)
@unittest.skip(reason="SamVisionModel does not support training")
def test_training(self):
pass
@unittest.skip(reason="SamVisionModel does not support training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="SamVisionModel does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Hidden_states is tested in create_and_check_model tests")
def test_hidden_states_output(self):
pass
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="SAM model can't be compiled dynamic yet")
|
SamVisionModelTest
|
python
|
mlflow__mlflow
|
mlflow/utils/async_logging/async_logging_queue.py
|
{
"start": 1312,
"end": 14279
}
|
class ____:
"""
This is a queue based run data processor that queues incoming batches and processes them using
single worker thread.
"""
def __init__(
self, logging_func: Callable[[str, list[Metric], list[Param], list[RunTag]], None]
) -> None:
"""Initializes an AsyncLoggingQueue object.
Args:
logging_func: A callable function that takes in four arguments: a string
representing the run_id, a list of Metric objects,
a list of Param objects, and a list of RunTag objects.
"""
self._queue = Queue()
self._lock = threading.RLock()
self._logging_func = logging_func
self._stop_data_logging_thread_event = threading.Event()
self._status = QueueStatus.IDLE
def _at_exit_callback(self) -> None:
"""Callback function to be executed when the program is exiting.
Stops the data processing thread and waits for the queue to be drained. Finally, shuts down
the thread pools used for data logging and batch processing status check.
"""
try:
# Stop the data processing thread
self._stop_data_logging_thread_event.set()
# Waits till logging queue is drained.
self._batch_logging_thread.join()
self._batch_logging_worker_threadpool.shutdown(wait=True)
self._batch_status_check_threadpool.shutdown(wait=True)
except Exception as e:
_logger.error(f"Encountered error while trying to finish logging: {e}")
def end_async_logging(self) -> None:
with self._lock:
# Stop the data processing thread.
self._stop_data_logging_thread_event.set()
# Waits till logging queue is drained.
self._batch_logging_thread.join()
# Set the status to tear down. The worker threads will still process
# the remaining data.
self._status = QueueStatus.TEAR_DOWN
# Clear the status to avoid blocking next logging.
self._stop_data_logging_thread_event.clear()
def shut_down_async_logging(self) -> None:
"""
Shut down the async logging queue and wait for the queue to be drained.
Use this method if the async logging should be terminated.
"""
self.end_async_logging()
self._batch_logging_worker_threadpool.shutdown(wait=True)
self._batch_status_check_threadpool.shutdown(wait=True)
self._status = QueueStatus.IDLE
def flush(self) -> None:
"""
Flush the async logging queue and restart thread to listen
to incoming data after flushing.
Calling this method will flush the queue to ensure all the data are logged.
"""
self.shut_down_async_logging()
# Reinitialize the logging thread and set the status to active.
self.activate()
def _logging_loop(self) -> None:
"""
Continuously logs run data until `self._continue_to_process_data` is set to False.
If an exception occurs during logging, a `MlflowException` is raised.
"""
try:
while not self._stop_data_logging_thread_event.is_set():
self._log_run_data()
# Drain the queue after the stop event is set.
while not self._queue.empty():
self._log_run_data()
except Exception as e:
from mlflow.exceptions import MlflowException
raise MlflowException(f"Exception inside the run data logging thread: {e}")
def _fetch_batch_from_queue(self) -> list[RunBatch]:
"""Fetches a batch of run data from the queue.
Returns:
RunBatch: A batch of run data.
"""
batches = []
if self._queue.empty():
return batches
queue_size = self._queue.qsize() # Estimate the queue's size.
merged_batch = self._queue.get()
for i in range(queue_size - 1):
if self._queue.empty():
# `queue_size` is an estimate, so we need to check if the queue is empty.
break
batch = self._queue.get()
if (
merged_batch.run_id != batch.run_id
or (
len(merged_batch.metrics + merged_batch.params + merged_batch.tags)
+ len(batch.metrics + batch.params + batch.tags)
)
>= _MAX_ITEMS_PER_BATCH
or len(merged_batch.params) + len(batch.params) >= _MAX_PARAMS_PER_BATCH
or len(merged_batch.tags) + len(batch.tags) >= _MAX_TAGS_PER_BATCH
):
# Make a new batch if the run_id is different or the batch is full.
batches.append(merged_batch)
merged_batch = batch
else:
merged_batch.add_child_batch(batch)
merged_batch.params.extend(batch.params)
merged_batch.tags.extend(batch.tags)
merged_batch.metrics.extend(batch.metrics)
batches.append(merged_batch)
return batches
def _log_run_data(self) -> None:
"""Process the run data in the running runs queues.
For each run in the running runs queues, this method retrieves the next batch of run data
from the queue and processes it by calling the `_processing_func` method with the run ID,
metrics, parameters, and tags in the batch. If the batch is empty, it is skipped. After
processing the batch, the processed watermark is updated and the batch event is set.
If an exception occurs during processing, the exception is logged and the batch event is set
with the exception. If the queue is empty, it is ignored.
Returns: None
"""
async_logging_buffer_seconds = MLFLOW_ASYNC_LOGGING_BUFFERING_SECONDS.get()
try:
if async_logging_buffer_seconds:
self._stop_data_logging_thread_event.wait(async_logging_buffer_seconds)
run_batches = self._fetch_batch_from_queue()
else:
run_batches = [self._queue.get(timeout=1)]
except Empty:
# Ignore empty queue exception
return
def logging_func(run_batch):
try:
self._logging_func(
run_id=run_batch.run_id,
metrics=run_batch.metrics,
params=run_batch.params,
tags=run_batch.tags,
)
except Exception as e:
_logger.error(f"Run Id {run_batch.run_id}: Failed to log run data: Exception: {e}")
run_batch.exception = e
finally:
run_batch.complete()
for run_batch in run_batches:
try:
self._batch_logging_worker_threadpool.submit(logging_func, run_batch)
except Exception as e:
_logger.error(
f"Failed to submit batch for logging: {e}. Usually this means you are not "
"shutting down MLflow properly before exiting. Please make sure you are using "
"context manager, e.g., `with mlflow.start_run():` or call `mlflow.end_run()`"
"explicitly to terminate MLflow logging before exiting."
)
run_batch.exception = e
run_batch.complete()
def _wait_for_batch(self, batch: RunBatch) -> None:
"""Wait for the given batch to be processed by the logging thread.
Args:
batch: The batch to wait for.
Raises:
Exception: If an exception occurred while processing the batch.
"""
batch.completion_event.wait()
if batch.exception:
raise batch.exception
def __getstate__(self):
"""Return the state of the object for pickling.
This method is called by the `pickle` module when the object is being pickled. It returns a
dictionary containing the object's state, with non-picklable attributes removed.
Returns:
dict: A dictionary containing the object's state.
"""
state = self.__dict__.copy()
del state["_queue"]
del state["_lock"]
del state["_status"]
if "_run_data_logging_thread" in state:
del state["_run_data_logging_thread"]
if "_stop_data_logging_thread_event" in state:
del state["_stop_data_logging_thread_event"]
if "_batch_logging_thread" in state:
del state["_batch_logging_thread"]
if "_batch_logging_worker_threadpool" in state:
del state["_batch_logging_worker_threadpool"]
if "_batch_status_check_threadpool" in state:
del state["_batch_status_check_threadpool"]
return state
def __setstate__(self, state):
"""Set the state of the object from a given state dictionary.
It pops back the removed non-picklable attributes from `self.__getstate__()`.
Args:
state (dict): A dictionary containing the state of the object.
Returns:
None
"""
self.__dict__.update(state)
self._queue = Queue()
self._lock = threading.RLock()
self._status = QueueStatus.IDLE
self._batch_logging_thread = None
self._batch_logging_worker_threadpool = None
self._batch_status_check_threadpool = None
self._stop_data_logging_thread_event = threading.Event()
def log_batch_async(
self, run_id: str, params: list[Param], tags: list[RunTag], metrics: list[Metric]
) -> RunOperations:
"""Asynchronously logs a batch of run data (parameters, tags, and metrics).
Args:
run_id (str): The ID of the run to log data for.
params (list[mlflow.entities.Param]): A list of parameters to log for the run.
tags (list[mlflow.entities.RunTag]): A list of tags to log for the run.
metrics (list[mlflow.entities.Metric]): A list of metrics to log for the run.
Returns:
mlflow.utils.async_utils.RunOperations: An object that encapsulates the
asynchronous operation of logging the batch of run data.
The object contains a list of `concurrent.futures.Future` objects that can be used
to check the status of the operation and retrieve any exceptions
that occurred during the operation.
"""
from mlflow import MlflowException
if not self.is_active():
raise MlflowException("AsyncLoggingQueue is not activated.")
batch = RunBatch(
run_id=run_id,
params=params,
tags=tags,
metrics=metrics,
completion_event=threading.Event(),
)
self._queue.put(batch)
operation_future = self._batch_status_check_threadpool.submit(self._wait_for_batch, batch)
return RunOperations(operation_futures=[operation_future])
def is_active(self) -> bool:
return self._status == QueueStatus.ACTIVE
def is_idle(self) -> bool:
return self._status == QueueStatus.IDLE
def _set_up_logging_thread(self) -> None:
"""
Sets up the logging thread.
This method shouldn't be called directly without shutting down the async
logging first if an existing async logging exists, otherwise it might
hang the program.
"""
with self._lock:
self._batch_logging_thread = threading.Thread(
target=self._logging_loop,
name="MLflowAsyncLoggingLoop",
daemon=True,
)
self._batch_logging_worker_threadpool = ThreadPoolExecutor(
max_workers=MLFLOW_ASYNC_LOGGING_THREADPOOL_SIZE.get() or 10,
thread_name_prefix=ASYNC_LOGGING_WORKER_THREAD_PREFIX,
)
self._batch_status_check_threadpool = ThreadPoolExecutor(
max_workers=MLFLOW_ASYNC_LOGGING_THREADPOOL_SIZE.get() or 10,
thread_name_prefix=ASYNC_LOGGING_STATUS_CHECK_THREAD_PREFIX,
)
self._batch_logging_thread.start()
def activate(self) -> None:
"""Activates the async logging queue
1. Initializes queue draining thread.
2. Initializes threads for checking the status of logged batch.
3. Registering an atexit callback to ensure that any remaining log data
is flushed before the program exits.
If the queue is already activated, this method does nothing.
"""
with self._lock:
if self.is_active():
return
self._set_up_logging_thread()
atexit.register(self._at_exit_callback)
self._status = QueueStatus.ACTIVE
|
AsyncLoggingQueue
|
python
|
pytorch__pytorch
|
test/distributed/elastic/utils/data/cycling_iterator_test.py
|
{
"start": 342,
"end": 1473
}
|
class ____(unittest.TestCase):
def generator(self, epoch, stride, max_epochs):
# generate an continuously incrementing list each epoch
# e.g. [0,1,2] [3,4,5] [6,7,8] ...
return iter([stride * epoch + i for i in range(stride)])
def test_cycling_iterator(self):
stride = 3
max_epochs = 90
def generator_fn(epoch):
return self.generator(epoch, stride, max_epochs)
it = CyclingIterator(n=max_epochs, generator_fn=generator_fn)
for i in range(stride * max_epochs):
self.assertEqual(i, next(it))
with self.assertRaises(StopIteration):
next(it)
def test_cycling_iterator_start_epoch(self):
stride = 3
max_epochs = 2
start_epoch = 1
def generator_fn(epoch):
return self.generator(epoch, stride, max_epochs)
it = CyclingIterator(max_epochs, generator_fn, start_epoch)
for i in range(stride * start_epoch, stride * max_epochs):
self.assertEqual(i, next(it))
with self.assertRaises(StopIteration):
next(it)
|
CyclingIteratorTest
|
python
|
django-extensions__django-extensions
|
tests/management/commands/test_delete_squashed_migrations.py
|
{
"start": 1162,
"end": 4067
}
|
class ____(BaseDeleteSquashedMigrationsTestCase):
"""Tests for delete_squashed_migrations command exceptions."""
def test_should_raise_CommandError_if_app_does_not_have_migrations(self):
with self.assertRaisesRegex(
CommandError,
r"App 'testapp_with_no_models_file' does not have migrations \(so delete_squashed_migrations on it makes no sense\)",
):
call_command("delete_squashed_migrations", "testapp_with_no_models_file")
def test_should_raise_CommandEror_if_migration_is_not_squashed(self):
with self.assertRaisesRegex(
CommandError,
"The migration testapp_with_appconfig 0001_initial is not a squashed migration.",
):
call_command("delete_squashed_migrations", "testapp_with_appconfig", "0001")
def test_should_raise_CommandEror_if_more_than_one_migration_matches_to_given_arg(
self,
):
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command("squashmigrations", "testapp_with_appconfig", "0002", "--noinput")
with self.assertRaisesRegex(
CommandError,
"More than one migration matches '0001' in app 'testapp_with_appconfig'. Please be more specific.",
):
call_command("delete_squashed_migrations", "testapp_with_appconfig", "0001")
def test_should_raise_CommandEror_if_squashed_migration_not_found(self):
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
with self.assertRaisesRegex(
CommandError,
"Cannot find a squashed migration in app 'testapp_with_appconfig'.",
):
call_command("delete_squashed_migrations", "testapp_with_appconfig")
def test_should_raise_CommandEror_if_squashed_migration_not_foundee(self):
with self.assertRaisesRegex(
CommandError,
"Cannot find a migration matching '0002' from app 'testapp_with_appconfig'.",
):
call_command("delete_squashed_migrations", "testapp_with_appconfig", "0002")
def test_should_raise_CommandError_when_database_does_not_exist(self):
with self.assertRaisesRegex(
CommandError, "Unknown database non-existing_database"
):
call_command(
"delete_squashed_migrations", "--database=non-existing_database"
)
@pytest.mark.xfail
|
DeleteSquashedMigrationsExceptionsTests
|
python
|
PyCQA__pycodestyle
|
testing/data/python313.py
|
{
"start": 116,
"end": 146
}
|
class ____[T = str]:
pass
|
C2
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/core/expect_column_values_to_be_decreasing.py
|
{
"start": 1038,
"end": 7435
}
|
class ____(ColumnMapExpectation):
"""Expect the column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
If 'strictly=True', then this expectation is only satisfied if each consecutive value \
is strictly decreasing--equal values are treated as failures.
ExpectColumnValuesToBeDecreasing is a \
Column Map Expectation
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToBeIncreasing](https://greatexpectations.io/expectations/expect_column_values_to_be_increasing)
""" # noqa: E501 # FIXME CoP
strictly: Union[bool, SuiteParameterDict, None] = None
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.decreasing"
success_keys = (
"strictly",
"mostly",
)
args_keys = ("column",)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("strictly", RendererValueType.BOOLEAN),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if params.strictly:
template_str = "values must be strictly less than previous values"
else:
template_str = "values must be less than or equal to previous values"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"strictly",
"mostly",
"row_condition",
"condition_parser",
],
)
if params.get("strictly"):
template_str = "values must be strictly less than previous values"
else:
template_str = "values must be less than or equal to previous values"
if params["mostly"] is not None and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
|
ExpectColumnValuesToBeDecreasing
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclassHash1.py
|
{
"start": 712,
"end": 829
}
|
class ____:
a: int
def __eq__(self, other) -> bool:
return self.a == other.a
v7: Hashable = DC7(0)
|
DC7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.