language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-genesys/source_genesys/source.py | {
"start": 4306,
"end": 4609
} | class ____(GenesysStream):
"""
API Docs: https://developer.genesys.cloud/telephony/telephony-apis
"""
primary_key = "id"
cursor_field = "dateModified"
def path(self, **kwargs) -> str:
return "telephony/providers/edges/outboundroutes"
| TelephonyProvidersEdgesOutboundroutes |
python | spyder-ide__spyder | spyder/widgets/comboboxes.py | {
"start": 824,
"end": 4816
} | class ____(SpyderComboBox):
"""Editable combo box base class"""
valid = Signal(bool, bool)
sig_tab_pressed = Signal(bool)
sig_resized = Signal(QSize, QSize)
"""
This signal is emitted to inform the widget has been resized.
Parameters
----------
size: QSize
The new size of the widget.
old_size: QSize
The previous size of the widget.
"""
def __init__(self, parent, items_elide_mode=None):
super().__init__(parent, items_elide_mode)
self.setEditable(True)
self.setCompleter(QCompleter(self))
self.selected_text = self.currentText()
# --- Qt overrides
def event(self, event):
"""Qt Override.
Filter tab keys and process double tab keys.
"""
# Type check: Prevent error in PySide where 'event' may be of type
# QtGui.QPainter (for whatever reason).
if not isinstance(event, QEvent):
return True
if (event.type() == QEvent.KeyPress) and (event.key() == Qt.Key_Tab):
self.sig_tab_pressed.emit(True)
return True
return super().event(event)
def focusOutEvent(self, event):
"""
Qt Override.
Handle focus out event to prevent changing current text with some other
entry in the history that could match the current text in a case
insensitive manner.
See spyder-ide/spyder#23597
"""
self.add_current_text_if_valid()
super().focusOutEvent(event)
def keyPressEvent(self, event):
"""Qt Override.
Handle key press events.
"""
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
if self.add_current_text_if_valid():
self.selected()
self.hide_completer()
elif event.key() == Qt.Key_Escape:
self.set_current_text(self.selected_text)
self.hide_completer()
else:
super().keyPressEvent(event)
def resizeEvent(self, event):
"""
Emit a resize signal for widgets that need to adapt its size.
"""
super().resizeEvent(event)
self.sig_resized.emit(event.size(), event.oldSize())
# --- Own methods
def is_valid(self, qstr):
"""
Return True if string is valid
Return None if validation can't be done
"""
pass
def selected(self):
"""Action to be executed when a valid item has been selected"""
self.valid.emit(True, True)
def add_text(self, text):
"""Add text to combo box: add a new item if text is not found in
combo box items."""
index = self.findText(text, Qt.MatchCaseSensitive)
while index != -1:
self.removeItem(index)
index = self.findText(text, Qt.MatchCaseSensitive)
self.insertItem(0, text)
index = self.findText('')
if index != -1:
self.removeItem(index)
self.insertItem(0, '')
if text != '':
self.setCurrentIndex(1)
else:
self.setCurrentIndex(0)
else:
self.setCurrentIndex(0)
self.selected_text = text
def set_current_text(self, text):
"""Sets the text of the QLineEdit of the QComboBox."""
self.lineEdit().setText(str(text))
def add_current_text(self):
"""Add current text to combo box history (convenient method)"""
text = self.currentText()
self.add_text(text)
def add_current_text_if_valid(self):
"""Add current text to combo box history if valid"""
valid = self.is_valid(self.currentText())
if valid or valid is None:
self.add_current_text()
return True
else:
self.set_current_text(self.selected_text)
def hide_completer(self):
"""Hides the completion widget."""
self.setCompleter(QCompleter([], self))
| BaseComboBox |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 7898,
"end": 8150
} | class ____(PydanticValueError):
code = 'list.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
| ListMinLengthError |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/main_widget.py | {
"start": 2013,
"end": 2517
} | class ____:
PasteAction = 'paste_action'
CopyAction = 'copy'
EditAction = 'edit_action'
PlotAction = 'plot_action'
HistogramAction = 'histogram_action'
ImshowAction = 'imshow_action'
SaveArrayAction = 'save_array_action'
InsertAction = 'insert_action'
RemoveAction = 'remove_action'
RenameAction = 'rename_action'
DuplicateAction = 'duplicate_action'
ViewAction = 'view_action'
EditFiltersAction = 'edit_filters_action'
| VariableExplorerContextMenuActions |
python | neetcode-gh__leetcode | python/1822-sign-of-the-product-of-an-array.py | {
"start": 0,
"end": 252
} | class ____:
def arraySign(self, nums: List[int]) -> int:
flag = True
for i in nums:
if i == 0:
return 0
if i < 0:
flag = not flag
return 1 if flag else -1
| Solution |
python | walkccc__LeetCode | solutions/2200. Find All K-Distant Indices in an Array/2200.py | {
"start": 0,
"end": 386
} | class ____:
def findKDistantIndices(self, nums: list[int], key: int, k: int) -> list[int]:
n = len(nums)
ans = []
j = 0
for i in range(n):
# the first index j s.t. nums[j] == key and j >= i - k
while j < n and (nums[j] != key or j < i - k):
j += 1
if j == n:
break
if abs(i - j) <= k:
ans.append(i)
return ans
| Solution |
python | getsentry__sentry | src/sentry/incidents/models/incident.py | {
"start": 8501,
"end": 9684
} | class ____(Model):
"""
An IncidentActivity is a record of a change that occurred in an Incident. This could be a status change,
"""
__relocation_scope__ = RelocationScope.Global
incident = FlexibleForeignKey("sentry.Incident")
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
type: models.Field = models.IntegerField()
value = models.TextField(null=True)
previous_value = models.TextField(null=True)
comment = models.TextField(null=True)
date_added = models.DateTimeField(default=timezone.now)
notification_uuid = models.UUIDField("notification_uuid", null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_incidentactivity"
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
if old_pk is None:
return None
# Generate a new UUID, if one exists.
if self.notification_uuid:
self.notification_uuid = uuid4()
return old_pk
| IncidentActivity |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 34113,
"end": 40394
} | class ____(Field):
default_error_messages = {
'invalid': _('A valid number is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),
'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),
'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),
'max_string_length': _('String value too large.')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None,
localize=False, rounding=None, normalize_output=False, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.localize = localize
self.normalize_output = normalize_output
if coerce_to_string is not None:
self.coerce_to_string = coerce_to_string
if self.localize:
self.coerce_to_string = True
self.max_value = max_value
self.min_value = min_value
if self.max_value is not None and not isinstance(self.max_value, (int, decimal.Decimal)):
warnings.warn("max_value should be an integer or Decimal instance.")
if self.min_value is not None and not isinstance(self.min_value, (int, decimal.Decimal)):
warnings.warn("min_value should be an integer or Decimal instance.")
if self.max_digits is not None and self.decimal_places is not None:
self.max_whole_digits = self.max_digits - self.decimal_places
else:
self.max_whole_digits = None
super().__init__(**kwargs)
if self.max_value is not None:
message = lazy_format(self.error_messages['max_value'], max_value=self.max_value)
self.validators.append(
MaxValueValidator(self.max_value, message=message))
if self.min_value is not None:
message = lazy_format(self.error_messages['min_value'], min_value=self.min_value)
self.validators.append(
MinValueValidator(self.min_value, message=message))
if rounding is not None:
valid_roundings = [v for k, v in vars(decimal).items() if k.startswith('ROUND_')]
assert rounding in valid_roundings, (
'Invalid rounding option %s. Valid values for rounding are: %s' % (rounding, valid_roundings))
self.rounding = rounding
def validate_empty_values(self, data):
if smart_str(data).strip() == '' and self.allow_null:
return (True, None)
return super().validate_empty_values(data)
def to_internal_value(self, data):
"""
Validate that the input is a decimal number and return a Decimal
instance.
"""
data = smart_str(data).strip()
if self.localize:
data = sanitize_separators(data)
if len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
value = decimal.Decimal(data)
except decimal.DecimalException:
self.fail('invalid')
if value.is_nan():
self.fail('invalid')
# Check for infinity and negative infinity.
if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):
self.fail('invalid')
return self.quantize(self.validate_precision(value))
def validate_precision(self, value):
"""
Ensure that there are no more than max_digits in the number, and no
more than decimal_places digits after the decimal point.
Override this method to disable the precision validation for input
values or to enhance it in any way you need to.
"""
sign, digittuple, exponent = value.as_tuple()
if exponent >= 0:
# 1234500.0
total_digits = len(digittuple) + exponent
whole_digits = total_digits
decimal_places = 0
elif len(digittuple) > abs(exponent):
# 123.45
total_digits = len(digittuple)
whole_digits = total_digits - abs(exponent)
decimal_places = abs(exponent)
else:
# 0.001234
total_digits = abs(exponent)
whole_digits = 0
decimal_places = total_digits
if self.max_digits is not None and total_digits > self.max_digits:
self.fail('max_digits', max_digits=self.max_digits)
if self.decimal_places is not None and decimal_places > self.decimal_places:
self.fail('max_decimal_places', max_decimal_places=self.decimal_places)
if self.max_whole_digits is not None and whole_digits > self.max_whole_digits:
self.fail('max_whole_digits', max_whole_digits=self.max_whole_digits)
return value
def to_representation(self, value):
coerce_to_string = getattr(self, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING)
if value is None:
if coerce_to_string:
return ''
else:
return None
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value).strip())
quantized = self.quantize(value)
if self.normalize_output:
quantized = quantized.normalize()
if not coerce_to_string:
return quantized
if self.localize:
return localize_input(quantized)
return f'{quantized:f}'
def quantize(self, value):
"""
Quantize the decimal value to the configured precision.
"""
if self.decimal_places is None:
return value
context = decimal.getcontext().copy()
if self.max_digits is not None:
context.prec = self.max_digits
return value.quantize(
decimal.Decimal('.1') ** self.decimal_places,
rounding=self.rounding,
context=context
)
# Date & time fields...
| DecimalField |
python | TheAlgorithms__Python | ciphers/xor_cipher.py | {
"start": 406,
"end": 6928
} | class ____:
def __init__(self, key: int = 0):
"""
simple constructor that receives a key or uses
default key = 0
"""
# private field
self.__key = key
def encrypt(self, content: str, key: int) -> list[str]:
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
Empty list
>>> XORCipher().encrypt("", 5)
[]
One key
>>> XORCipher().encrypt("hallo welt", 1)
['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u']
Normal key
>>> XORCipher().encrypt("HALLO WELT", 32)
['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't']
Key greater than 255
>>> XORCipher().encrypt("hallo welt", 256)
['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't']
"""
# precondition
assert isinstance(key, int)
assert isinstance(content, str)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 256
return [chr(ord(ch) ^ key) for ch in content]
def decrypt(self, content: str, key: int) -> list[str]:
"""
input: 'content' of type list and 'key' of type int
output: decrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
Empty list
>>> XORCipher().decrypt("", 5)
[]
One key
>>> XORCipher().decrypt("hallo welt", 1)
['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u']
Normal key
>>> XORCipher().decrypt("HALLO WELT", 32)
['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't']
Key greater than 255
>>> XORCipher().decrypt("hallo welt", 256)
['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't']
"""
# precondition
assert isinstance(key, int)
assert isinstance(content, str)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 256
return [chr(ord(ch) ^ key) for ch in content]
def encrypt_string(self, content: str, key: int = 0) -> str:
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
Empty list
>>> XORCipher().encrypt_string("", 5)
''
One key
>>> XORCipher().encrypt_string("hallo welt", 1)
'i`mmn!vdmu'
Normal key
>>> XORCipher().encrypt_string("HALLO WELT", 32)
'hallo\\x00welt'
Key greater than 255
>>> XORCipher().encrypt_string("hallo welt", 256)
'hallo welt'
"""
# precondition
assert isinstance(key, int)
assert isinstance(content, str)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 256
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def decrypt_string(self, content: str, key: int = 0) -> str:
"""
input: 'content' of type string and 'key' of type int
output: decrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
Empty list
>>> XORCipher().decrypt_string("", 5)
''
One key
>>> XORCipher().decrypt_string("hallo welt", 1)
'i`mmn!vdmu'
Normal key
>>> XORCipher().decrypt_string("HALLO WELT", 32)
'hallo\\x00welt'
Key greater than 255
>>> XORCipher().decrypt_string("hallo welt", 256)
'hallo welt'
"""
# precondition
assert isinstance(key, int)
assert isinstance(content, str)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 256
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def encrypt_file(self, file: str, key: int = 0) -> bool:
"""
input: filename (str) and a key (int)
output: returns true if encrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str)
assert isinstance(key, int)
# make sure key is an appropriate size
key %= 256
try:
with open(file) as fin, open("encrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(line, key))
except OSError:
return False
return True
def decrypt_file(self, file: str, key: int) -> bool:
"""
input: filename (str) and a key (int)
output: returns true if decrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str)
assert isinstance(key, int)
# make sure key is an appropriate size
key %= 256
try:
with open(file) as fin, open("decrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(line, key))
except OSError:
return False
return True
if __name__ == "__main__":
from doctest import testmod
testmod()
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| XORCipher |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/grant_types/base.py | {
"start": 398,
"end": 2540
} | class ____:
"""
Container object for holding custom validator callables to be invoked
as part of the grant type `validate_authorization_request()` or
`validate_authorization_request()` methods on the various grant types.
Authorization validators must be callables that take a request object and
return a dict, which may contain items to be added to the `request_info`
returned from the grant_type after validation.
Token validators must be callables that take a request object and
return None.
Both authorization validators and token validators may raise OAuth2
exceptions if validation conditions fail.
Authorization validators added to `pre_auth` will be run BEFORE
the standard validations (but after the critical ones that raise
fatal errors) as part of `validate_authorization_request()`
Authorization validators added to `post_auth` will be run AFTER
the standard validations as part of `validate_authorization_request()`
Token validators added to `pre_token` will be run BEFORE
the standard validations as part of `validate_token_request()`
Token validators added to `post_token` will be run AFTER
the standard validations as part of `validate_token_request()`
For example:
>>> def my_auth_validator(request):
... return {'myval': True}
>>> auth_code_grant = AuthorizationCodeGrant(request_validator)
>>> auth_code_grant.custom_validators.pre_auth.append(my_auth_validator)
>>> def my_token_validator(request):
... if not request.everything_okay:
... raise errors.OAuth2Error("uh-oh")
>>> auth_code_grant.custom_validators.post_token.append(my_token_validator)
"""
def __init__(self, post_auth, post_token,
pre_auth, pre_token):
self.pre_auth = pre_auth
self.post_auth = post_auth
self.pre_token = pre_token
self.post_token = post_token
@property
def all_pre(self):
return chain(self.pre_auth, self.pre_token)
@property
def all_post(self):
return chain(self.post_auth, self.post_token)
| ValidatorsContainer |
python | pypa__warehouse | warehouse/admin/views/organizations.py | {
"start": 3569,
"end": 17474
} | class ____(wtforms.Form):
seat_limit = wtforms.IntegerField(
validators=[
wtforms.validators.InputRequired(message="Specify seat limit"),
wtforms.validators.NumberRange(
min=1, message="Seat limit must be at least 1"
),
]
)
expires = wtforms.DateField(
validators=[
wtforms.validators.InputRequired(message="Specify expiration date"),
]
)
def validate_expires(self, field):
if field.data and field.data <= datetime.date.today():
raise wtforms.ValidationError("Expiration date must be in the future")
def _turbo_mode(request):
next_organization_application = (
request.db.query(OrganizationApplication)
.filter(OrganizationApplication.status == "submitted")
.order_by(OrganizationApplication.submitted)
.first()
)
if next_organization_application:
return HTTPSeeOther(
request.route_path(
"admin.organization_application.detail",
organization_application_id=next_organization_application.id,
)
)
else:
request.session.flash(
"No more Organization Applications to review!", queue="success"
)
return HTTPSeeOther(request.route_path("admin.dashboard"))
@view_config(
route_name="admin.organization.list",
renderer="warehouse.admin:templates/admin/organizations/list.html",
permission=Permissions.AdminOrganizationsRead,
uses_session=True,
)
def organization_list(request):
q = request.params.get("q", "")
terms = shlex.split(q)
try:
page_num = int(request.params.get("page", 1))
except ValueError:
raise HTTPBadRequest("'page' must be an integer.") from None
organizations_query = (
request.db.query(Organization)
.options(joinedload(Organization.subscriptions))
.order_by(Organization.normalized_name)
)
if q:
filters: list = []
for term in terms:
# Examples:
# - search individual words or "whole phrase" in any field
# - name:psf
# - org:python
# - organization:python
# - url:.org
# - desc:word
# - description:word
# - description:"whole phrase"
# - is:active
# - is:inactive
# - type:company
# - type:community
try:
field, value = term.lower().split(":", 1)
except ValueError:
field, value = "", term
if field == "name":
# Add filter for `name` or `normalized_name` fields.
filters.append(
[
Organization.name.ilike(f"%{value}%"),
Organization.normalized_name.ilike(f"%{value}%"),
]
)
elif field == "org" or field == "organization":
# Add filter for `display_name` field.
filters.append(Organization.display_name.ilike(f"%{value}%"))
elif field == "url" or field == "link_url":
# Add filter for `link_url` field.
filters.append(Organization.link_url.ilike(f"%{value}%"))
elif field == "desc" or field == "description":
# Add filter for `description` field.
filters.append(Organization.description.ilike(f"%{value}%"))
elif field == "is":
# Add filter for `is_active` field.
if "active".startswith(value):
filters.append(Organization.is_active == True) # noqa: E712
elif "inactive".startswith(value):
filters.append(Organization.is_active == False) # noqa: E712
elif field == "type":
if "company".startswith(value):
filters.append(Organization.orgtype == OrganizationType.Company)
elif "community".startswith(value):
filters.append(Organization.orgtype == OrganizationType.Community)
else:
# Add filter for any field.
filters.append(
[
Organization.name.ilike(f"%{term}%"),
Organization.normalized_name.ilike(f"%{term}%"),
Organization.display_name.ilike(f"%{term}%"),
Organization.link_url.ilike(f"%{term}%"),
Organization.description.ilike(f"%{term}%"),
]
)
# Use AND to add each filter. Use OR to combine subfilters.
for filter_or_subfilters in filters:
if isinstance(filter_or_subfilters, list):
# Add list of subfilters combined with OR.
filter_or_subfilters = filter_or_subfilters or [True]
organizations_query = organizations_query.filter(
or_(False, *filter_or_subfilters)
)
else:
# Add single filter.
organizations_query = organizations_query.filter(filter_or_subfilters)
organizations = SQLAlchemyORMPage(
organizations_query,
page=page_num,
items_per_page=25,
url_maker=paginate_url_factory(request),
)
return {"organizations": organizations, "query": q, "terms": terms}
@view_config(
route_name="admin.organization.detail",
renderer="warehouse.admin:templates/admin/organizations/detail.html",
permission=Permissions.AdminOrganizationsRead,
request_method="GET",
has_translations=True,
uses_session=True,
require_csrf=True,
require_methods=False,
)
@view_config(
route_name="admin.organization.detail",
renderer="warehouse.admin:templates/admin/organizations/detail.html",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
has_translations=True,
uses_session=True,
require_csrf=True,
require_methods=False,
)
def organization_detail(request):
organization_service = request.find_service(IOrganizationService, context=None)
billing_service = request.find_service(IBillingService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
form = OrganizationForm(
request.POST if request.method == "POST" else None,
organization,
)
if request.method == "POST" and form.validate():
form.populate_obj(organization)
# Update Stripe customer if organization has one
if organization.customer is not None:
billing_service.update_customer(
organization.customer.customer_id,
organization.customer_name(request.registry.settings["site.name"]),
organization.description,
)
request.session.flash(
f"Organization {organization.name!r} updated successfully",
queue="success",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Sort roles by username
roles = sorted(organization.roles, key=lambda r: r.user.username)
# Create role forms for each existing role
role_forms = {role.id: OrganizationRoleForm(obj=role) for role in roles}
# Create form for adding new roles
add_role_form = AddOrganizationRoleForm()
# Create form for manual activation
manual_activation_form = ManualActivationForm()
# Create form for OIDC issuer management
oidc_issuer_form = OrganizationOIDCIssuerForm()
return {
"organization": organization,
"form": form,
"roles": roles,
"role_forms": role_forms,
"add_role_form": add_role_form,
"manual_activation_form": manual_activation_form,
"oidc_issuer_form": oidc_issuer_form,
"ONE_MIB": ONE_MIB,
"MAX_FILESIZE": MAX_FILESIZE,
"ONE_GIB": ONE_GIB,
"MAX_PROJECT_SIZE": MAX_PROJECT_SIZE,
"UPLOAD_LIMIT_CAP": UPLOAD_LIMIT_CAP,
}
@view_config(
route_name="admin.organization.rename",
require_methods=["POST"],
permission=Permissions.AdminOrganizationsNameWrite,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_rename(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
old_organization_name = organization.name
new_organization_name = request.params.get("new_organization_name").strip()
try:
organization_service.rename_organization(organization_id, new_organization_name)
except ValueError as exc:
request.session.flash(exc.args[0], queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
request.session.flash(
f'"{old_organization_name}" organization renamed "{new_organization_name}"',
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization_application.list",
renderer="warehouse.admin:templates/admin/organization_applications/list.html",
permission=Permissions.AdminOrganizationsRead,
uses_session=True,
)
def organization_applications_list(request):
q = request.params.get("q", "")
terms = shlex.split(q)
organization_applications_query = request.db.query(
OrganizationApplication
).order_by(OrganizationApplication.submitted)
if q:
filters: list = []
for term in terms:
# Examples:
# - search individual words or "whole phrase" in any field
# - name:psf
# - org:python
# - organization:python
# - url:.org
# - desc:word
# - description:word
# - description:"whole phrase"
# - is:submitted
# - is:declined
# - is:deferred
# - is:moreinformationneeded
# - is:approved
# - type:company
# - type:community
try:
field, value = term.lower().split(":", 1)
except ValueError:
field, value = "", term
if field == "name":
# Add filter for `name` or `normalized_name` fields.
filters.append(
[
OrganizationApplication.name.ilike(f"%{value}%"),
OrganizationApplication.normalized_name.ilike(f"%{value}%"),
]
)
elif field == "org" or field == "organization":
# Add filter for `display_name` field.
filters.append(OrganizationApplication.display_name.ilike(f"%{value}%"))
elif field == "url" or field == "link_url":
# Add filter for `link_url` field.
filters.append(OrganizationApplication.link_url.ilike(f"%{value}%"))
elif field == "desc" or field == "description":
# Add filter for `description` field.
filters.append(OrganizationApplication.description.ilike(f"%{value}%"))
elif field == "type":
if "company".startswith(value):
filters.append(
OrganizationApplication.orgtype == OrganizationType.Company
)
elif "community".startswith(value):
filters.append(
OrganizationApplication.orgtype == OrganizationType.Community
)
elif field == "is":
if value in OrganizationApplicationStatus:
filters.append(OrganizationApplication.status == value)
else:
# Add filter for any field.
filters.append(
[
OrganizationApplication.name.ilike(f"%{term}%"),
OrganizationApplication.normalized_name.ilike(f"%{term}%"),
OrganizationApplication.display_name.ilike(f"%{term}%"),
OrganizationApplication.link_url.ilike(f"%{term}%"),
OrganizationApplication.description.ilike(f"%{term}%"),
]
)
# Use AND to add each filter. Use OR to combine subfilters.
for filter_or_subfilters in filters:
if isinstance(filter_or_subfilters, list):
# Add list of subfilters combined with OR.
filter_or_subfilters = filter_or_subfilters or [True]
organization_applications_query = (
organization_applications_query.filter(
or_(False, *filter_or_subfilters)
)
)
else:
# Add single filter.
organization_applications_query = (
organization_applications_query.filter(filter_or_subfilters)
)
organization_applications_query = organization_applications_query.options(
joinedload(OrganizationApplication.observations)
)
return {
"organization_applications": organization_applications_query.all(),
"query": q,
"terms": terms,
}
| ManualActivationForm |
python | google__jax | jax/_src/errors.py | {
"start": 4724,
"end": 8360
} | class ____(JAXIndexError):
"""
This error occurs when a program attempts to use non-concrete boolean indices
in a traced indexing operation. Under JIT compilation, JAX arrays must have
static shapes (i.e. shapes that are known at compile-time) and so boolean
masks must be used carefully. Some logic implemented via boolean masking is
simply not possible in a :func:`jax.jit` function; in other cases, the logic
can be re-expressed in a JIT-compatible way, often using the three-argument
version of :func:`~jax.numpy.where`.
Following are a few examples of when this error might arise.
Constructing arrays via boolean masking
This most commonly arises when attempting to create an array via a boolean
mask within a JIT context. For example::
>>> import jax
>>> import jax.numpy as jnp
>>> @jax.jit
... def positive_values(x):
... return x[x > 0]
>>> positive_values(jnp.arange(-5, 5)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[10])
This function is attempting to return only the positive values in the input
array; the size of this returned array cannot be determined at compile-time
unless `x` is marked as static, and so operations like this cannot be
performed under JIT compilation.
Reexpressible Boolean Logic
Although creating dynamically sized arrays is not supported directly, in
many cases it is possible to re-express the logic of the computation in
terms of a JIT-compatible operation. For example, here is another function
that fails under JIT for the same reason::
>>> @jax.jit
... def sum_of_positive(x):
... return x[x > 0].sum()
>>> sum_of_positive(jnp.arange(-5, 5)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[10])
In this case, however, the problematic array is only an intermediate value,
and we can instead express the same logic in terms of the JIT-compatible
three-argument version of :func:`jax.numpy.where`::
>>> @jax.jit
... def sum_of_positive(x):
... return jnp.where(x > 0, x, 0).sum()
>>> sum_of_positive(jnp.arange(-5, 5))
Array(10, dtype=int32)
This pattern of replacing boolean masking with three-argument
:func:`~jax.numpy.where` is a common solution to this sort of problem.
Boolean indexing into JAX arrays
The other situation where this error often arises is when using boolean
indices, such as with :code:`.at[...].set(...)`. Here is a simple example::
>>> @jax.jit
... def manual_clip(x):
... return x.at[x < 0].set(0)
>>> manual_clip(jnp.arange(-2, 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NonConcreteBooleanIndexError: Array boolean indices must be concrete: ShapedArray(bool[4])
This function is attempting to set values smaller than zero to a scalar fill
value. As above, this can be addressed by re-expressing the logic in terms
of :func:`~jax.numpy.where`::
>>> @jax.jit
... def manual_clip(x):
... return jnp.where(x < 0, 0, x)
>>> manual_clip(jnp.arange(-2, 2))
Array([0, 0, 0, 1], dtype=int32)
"""
def __init__(self, tracer: core.Tracer):
super().__init__(
f"Array boolean indices must be concrete; got {tracer}\n")
@export
| NonConcreteBooleanIndexError |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 38484,
"end": 40787
} | class ____(base_classes.Shape):
def __init__(self, parent, key):
self._parent = parent
self.xl = parent.xl.shapes[key]
@property
def parent(self):
return self._parent
@property
def api(self):
return self.xl
@property
def name(self):
return self.xl.name.get()
@name.setter
def name(self, value):
self.xl.name.set(value)
@property
def type(self):
return shape_types_k2s[self.xl.shape_type.get()]
@property
def left(self):
return self.xl.left_position.get()
@left.setter
def left(self, value):
self.xl.left_position.set(value)
@property
def top(self):
return self.xl.top.get()
@top.setter
def top(self, value):
self.xl.top.set(value)
@property
def width(self):
return self.xl.width.get()
@width.setter
def width(self, value):
self.xl.width.set(value)
@property
def height(self):
return self.xl.height.get()
@height.setter
def height(self, value):
self.xl.height.set(value)
def delete(self):
self.xl.delete()
@property
def index(self):
return self.xl.entry_index.get()
def activate(self):
# self.xl.activate_object() # doesn't work?
self.xl.select()
def scale_height(self, factor, relative_to_original_size, scale):
self.xl.scale_height(
scale=scaling[scale],
relative_to_original_size=relative_to_original_size,
factor=factor,
)
def scale_width(self, factor, relative_to_original_size, scale):
self.xl.scale_width(
scale=scaling[scale],
relative_to_original_size=relative_to_original_size,
factor=factor,
)
@property
def text(self):
if self.xl.shape_text_frame.has_text.get():
return self.xl.shape_text_frame.text_range.content.get()
@text.setter
def text(self, value):
self.xl.shape_text_frame.text_range.content.set(value)
@property
def font(self):
return Font(self, self.xl.shape_text_frame.text_range.font)
@property
def characters(self):
raise AttributeError("Characters isn't supported on macOS with shapes.")
| Shape |
python | streamlit__streamlit | lib/streamlit/type_util.py | {
"start": 1708,
"end": 13019
} | class ____(Protocol):
"""Protocol for Streamlit native custom dictionaries (e.g. session state, secrets, query params).
that can be converted to a dict.
All these implementations should provide a to_dict method.
"""
def to_dict(self) -> dict[str, Any]: ...
@overload
def is_type(
obj: object, fqn_type_pattern: Literal["pydeck.bindings.deck.Deck"]
) -> TypeGuard[Deck]: ...
@overload
def is_type(
obj: object, fqn_type_pattern: Literal["plotly.graph_objs._figure.Figure"]
) -> TypeGuard[Figure]: ...
@overload
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool: ...
def is_type(obj: object, fqn_type_pattern: str | re.Pattern[str]) -> bool:
"""Check type without importing expensive modules.
Parameters
----------
obj : object
The object to type-check.
fqn_type_pattern : str or regex
The fully-qualified type string or a regular expression.
Regexes should start with `^` and end with `$`.
Example
-------
To check whether something is a Matplotlib Figure without importing
matplotlib, use:
>>> is_type(foo, "matplotlib.figure.Figure")
"""
fqn_type = get_fqn_type(obj)
if isinstance(fqn_type_pattern, str):
return fqn_type_pattern == fqn_type
return fqn_type_pattern.match(fqn_type) is not None
def _is_type_instance(obj: object, type_to_check: str) -> bool:
"""Check if instance of type without importing expensive modules."""
return type_to_check in [get_fqn(t) for t in type(obj).__mro__]
def get_object_name(obj: object) -> str:
"""Get a simplified name of the given object."""
if hasattr(obj, "__qualname__") and isinstance(obj.__qualname__, str):
return obj.__qualname__
if hasattr(obj, "__name__") and isinstance(obj.__name__, str):
return obj.__name__
return type(obj).__qualname__
def get_fqn(the_type: type) -> str:
"""Get module.type_name for a given type."""
return f"{the_type.__module__}.{the_type.__qualname__}"
def get_fqn_type(obj: object) -> str:
"""Get module.type_name for a given object."""
return get_fqn(type(obj))
_BYTES_LIKE_TYPES: Final[tuple[type, ...]] = (
bytes,
bytearray,
)
BytesLike: TypeAlias = bytes | bytearray
def is_bytes_like(obj: object) -> TypeGuard[BytesLike]:
"""True if the type is considered bytes-like for the purposes of
protobuf data marshalling.
"""
return isinstance(obj, _BYTES_LIKE_TYPES)
def to_bytes(obj: BytesLike) -> bytes:
"""Converts the given object to bytes.
Only types for which `is_bytes_like` is true can be converted; anything
else will result in an exception.
"""
if isinstance(obj, bytearray):
return bytes(obj)
if isinstance(obj, bytes):
return obj
raise RuntimeError(f"{obj} is not convertible to bytes")
_SYMPY_RE: Final = re.compile(r"^sympy.*$")
def is_sympy_expression(obj: object) -> TypeGuard[sympy.Expr]:
"""True if input is a SymPy expression."""
if not is_type(obj, _SYMPY_RE):
return False
try:
import sympy
return isinstance(obj, sympy.Expr)
except ImportError:
return False
_ALTAIR_RE: Final = re.compile(r"^altair\.vegalite\.v\d+\.api\.\w*Chart$")
def is_altair_chart(obj: object) -> bool:
"""True if input looks like an Altair chart."""
return is_type(obj, _ALTAIR_RE)
_PILLOW_RE: Final = re.compile(r"^PIL\..*")
def is_pillow_image(obj: object) -> bool:
"""True if input looks like a pillow image."""
return is_type(obj, _PILLOW_RE)
def is_keras_model(obj: object) -> bool:
"""True if input looks like a Keras model."""
return (
is_type(obj, "keras.engine.sequential.Sequential")
or is_type(obj, "keras.engine.training.Model")
or is_type(obj, "tensorflow.python.keras.engine.sequential.Sequential")
or is_type(obj, "tensorflow.python.keras.engine.training.Model")
)
# We use a regex here to allow potential changes in the module path in the future.
_OPENAI_CHUNK_RE: Final = re.compile(r"^openai\..+\.ChatCompletionChunk$")
def is_openai_chunk(obj: object) -> bool:
"""True if input looks like an OpenAI chat completion chunk."""
return is_type(obj, _OPENAI_CHUNK_RE)
def is_plotly_chart(obj: object) -> TypeGuard[Figure | list[Any] | dict[str, Any]]:
"""True if input looks like a Plotly chart."""
return (
is_type(obj, "plotly.graph_objs._figure.Figure")
or _is_list_of_plotly_objs(obj)
or _is_probably_plotly_dict(obj)
)
def is_graphviz_chart(
obj: object,
) -> TypeGuard[graphviz.Graph | graphviz.Digraph]:
"""True if input looks like a GraphViz chart."""
return (
# In GraphViz < 0.18
is_type(obj, "graphviz.dot.Graph")
or is_type(obj, "graphviz.dot.Digraph")
# In GraphViz >= 0.18
or is_type(obj, "graphviz.graphs.Graph")
or is_type(obj, "graphviz.graphs.Digraph")
or is_type(obj, "graphviz.sources.Source")
)
def _is_plotly_obj(obj: object) -> bool:
"""True if input if from a type that lives in plotly.plotly_objs."""
the_type = type(obj)
return the_type.__module__.startswith("plotly.graph_objs")
def _is_list_of_plotly_objs(obj: object) -> TypeGuard[list[Any]]:
if not isinstance(obj, list):
return False
if len(obj) == 0:
return False
return all(_is_plotly_obj(item) for item in obj)
def _is_probably_plotly_dict(obj: object) -> TypeGuard[dict[str, Any]]:
if not isinstance(obj, dict):
return False
if len(obj.keys()) == 0:
return False
if any(k not in ["config", "data", "frames", "layout"] for k in obj):
return False
if any(_is_plotly_obj(v) for v in obj.values()):
return True
return bool(any(_is_list_of_plotly_objs(v) for v in obj.values()))
def is_delta_generator(obj: object) -> TypeGuard[DeltaGenerator]:
"""True if input looks like a DeltaGenerator."""
# We are using a string here to avoid circular import warnings
# when importing DeltaGenerator.
return is_type(obj, "streamlit.delta_generator.DeltaGenerator")
def is_function(x: object) -> TypeGuard[types.FunctionType]:
"""Return True if x is a function."""
return isinstance(x, types.FunctionType)
def has_callable_attr(obj: object, name: str) -> bool:
"""True if obj has the specified attribute that is callable."""
return (
hasattr(obj, name)
and callable(getattr(obj, name))
# DeltaGenerator will return a callable wrapper for any method name,
# even if it doesn't exist.
and not is_delta_generator(obj)
)
def is_namedtuple(x: object) -> TypeGuard[NamedTuple]:
"""True if obj is an instance of a namedtuple."""
return isinstance(x, tuple) and has_callable_attr(x, "_asdict")
def is_dataclass_instance(obj: object) -> bool:
"""True if obj is an instance of a dataclass."""
# The not isinstance(obj, type) check is needed to make sure that this
# is an instance of a dataclass and not the class itself.
# dataclasses.is_dataclass returns True for either instance or class.
return dataclasses.is_dataclass(obj) and not isinstance(obj, type)
def is_pydeck(obj: object) -> TypeGuard[Deck]:
"""True if input looks like a pydeck chart."""
return is_type(obj, "pydeck.bindings.deck.Deck")
def is_pydantic_model(obj: object) -> bool:
"""True if input looks like a Pydantic model instance."""
if isinstance(obj, type):
# The obj is a class, but we
# only want to check for instances
# of Pydantic models, so we return False.
return False
return _is_type_instance(obj, "pydantic.main.BaseModel")
def _is_from_streamlit(obj: object) -> bool:
"""True if the object is from the streamlit package."""
return obj.__class__.__module__.startswith("streamlit")
def is_custom_dict(obj: object) -> TypeGuard[CustomDict]:
"""True if input looks like one of the Streamlit custom dictionaries."""
return (
isinstance(obj, Mapping)
and _is_from_streamlit(obj)
and has_callable_attr(obj, "to_dict")
)
def is_iterable(obj: object) -> TypeGuard[Iterable[Any]]:
try:
# The ignore statement here is intentional, as this is a
# perfectly fine way of checking for iterables.
iter(obj) # type: ignore[call-overload]
except TypeError:
return False
return True
def is_list_like(obj: object) -> TypeGuard[Sequence[Any]]:
"""True if input looks like a list."""
import array
if isinstance(obj, str):
return False
if isinstance(obj, (list, set, tuple)):
# Optimization to check the most common types first
return True
return isinstance(
obj,
(
array.ArrayType,
deque,
EnumMeta,
enumerate,
frozenset,
ItemsView,
KeysView,
map,
range,
UserList,
ValuesView,
),
)
def check_python_comparable(seq: Sequence[Any]) -> None:
"""Check if the sequence elements support "python comparison".
That means that the equality operator (==) returns a boolean value.
Which is not True for e.g. numpy arrays and pandas series.
"""
try:
bool(seq[0] == seq[0])
except LookupError:
# In case of empty sequences, the check not raise an exception.
pass
except ValueError:
raise StreamlitAPIException(
"Invalid option type provided. Options must be comparable, returning a "
f"boolean when used with *==*. \n\nGot **{type(seq[0]).__name__}**, "
"which cannot be compared. Refactor your code to use elements of "
"comparable types as options, e.g. use indices instead."
)
def is_altair_version_less_than(v: str) -> bool:
"""Return True if the current Altair version is less than the input version.
Parameters
----------
v : str
Version string, e.g. "0.25.0"
Returns
-------
bool
Raises
------
InvalidVersion
If the version strings are not valid.
"""
import altair as alt
return is_version_less_than(alt.__version__, v)
def is_version_less_than(v1: str, v2: str) -> bool:
"""Return True if the v1 version string is less than the v2 version string
based on semantic versioning.
Raises
------
InvalidVersion
If the version strings are not valid.
"""
from packaging import version
return version.parse(v1) < version.parse(v2)
def async_generator_to_sync(
async_gen: AsyncGenerator[Any, Any],
) -> Generator[Any, Any, Any]:
"""Convert an async generator to a synchronous generator."""
import asyncio
# Create a new event loop.
# It is expected that there is no existing event loop in the user thread.
loop = asyncio.new_event_loop()
try:
# Iterate over the async generator until it raises StopAsyncIteration
while True:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
# The async generator has finished
pass
finally:
loop.close()
| CustomDict |
python | google__jax | tests/dtypes_test.py | {
"start": 34786,
"end": 52172
} | class ____(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{jaxtype=}", "jaxtype": jaxtype}
for jaxtype in dtypes._jax_types + dtypes._weak_types)
def testJaxTypeFromType(self, jaxtype):
if isinstance(jaxtype, np.dtype):
jaxtype = dtypes.canonicalize_dtype(jaxtype)
self.assertIs(dtypes._jax_type(*dtypes._dtype_and_weaktype(jaxtype)), jaxtype)
@parameterized.named_parameters(
{"testcase_name": f"_{jaxtype=}", "jaxtype": jaxtype}
for jaxtype in dtypes._jax_types + dtypes._weak_types)
def testJaxTypeFromVal(self, jaxtype):
if isinstance(jaxtype, np.dtype):
jaxtype = dtypes.canonicalize_dtype(jaxtype)
try:
val = jaxtype(0)
except TypeError:
val = jaxtype.type(0)
self.assertIs(dtypes._jax_type(*dtypes._dtype_and_weaktype(val)), jaxtype)
@parameterized.named_parameters(
{"testcase_name": f"_{dtype=}", "dtype": dtype}
for dtype in dtypes._jax_types)
def testJaxTypeWeak(self, dtype):
jax_type = dtypes._jax_type(dtype, weak_type=True)
if dtypes.issubdtype(jax_type, np.complexfloating):
self.assertIs(jax_type, complex)
elif dtypes.issubdtype(jax_type, np.floating):
self.assertIs(jax_type, float)
elif dtypes.issubdtype(jax_type, np.integer):
self.assertIs(jax_type, int)
else:
self.assertIs(jax_type, np.dtype(bool))
@parameterized.named_parameters(
{"testcase_name": f"_{typ}", "typ": typ}
for typ in [bool, int, float, complex])
def testScalarWeakTypes(self, typ):
# Regression test for https://github.com/jax-ml/jax/issues/11377
val = typ(0)
result1 = jnp.array(val)
result2 = jax.jit(jnp.array)(val)
self.assertEqual(result1.aval, result2.aval)
with jax.numpy_dtype_promotion('standard'):
f = lambda x: x / 2
result1 = jnp.array(f(val))
result2 = jax.jit(f)(val)
self.assertEqual(result1.aval, result2.aval)
def testResultTypeNone(self):
# This matches the behavior of np.result_type(None) => np.float64
self.assertEqual(dtypes.result_type(None), dtypes.default_float_dtype())
def testResultTypeWeakFlag(self):
float_ = dtypes.default_float_dtype()
x_weak = jnp.array(1.)
x_strong = x_weak.astype(float_)
self.assertEqual(dtypes.result_type(x_weak), float_)
self.assertEqual(dtypes.result_type(x_weak, return_weak_type_flag=True), (float_, True))
self.assertEqual(dtypes.result_type(x_strong), float_)
self.assertEqual(dtypes.result_type(x_strong, return_weak_type_flag=True), (float_, False))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
@jax.numpy_dtype_promotion('standard')
def testObservedPromotionTable(self):
"""Test that the weak & strong dtype promotion table does not change over time."""
# Note: * here refers to weakly-typed values
typecodes = \
['b1','u1','u2','u4','u8','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i*','f*','c*']
if config.enable_x64.value:
expected = [
['b1','u1','u2','u4','u8','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i*','f*','c*'],
['u1','u1','u2','u4','u8','i2','i2','i4','i8','bf','f2','f4','f8','c4','c8','u1','f*','c*'],
['u2','u2','u2','u4','u8','i4','i4','i4','i8','bf','f2','f4','f8','c4','c8','u2','f*','c*'],
['u4','u4','u4','u4','u8','i8','i8','i8','i8','bf','f2','f4','f8','c4','c8','u4','f*','c*'],
['u8','u8','u8','u8','u8','f*','f*','f*','f*','bf','f2','f4','f8','c4','c8','u8','f*','c*'],
['i1','i2','i4','i8','f*','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i1','f*','c*'],
['i2','i2','i4','i8','f*','i2','i2','i4','i8','bf','f2','f4','f8','c4','c8','i2','f*','c*'],
['i4','i4','i4','i8','f*','i4','i4','i4','i8','bf','f2','f4','f8','c4','c8','i4','f*','c*'],
['i8','i8','i8','i8','f*','i8','i8','i8','i8','bf','f2','f4','f8','c4','c8','i8','f*','c*'],
['bf','bf','bf','bf','bf','bf','bf','bf','bf','bf','f4','f4','f8','c4','c8','bf','bf','c4'],
['f2','f2','f2','f2','f2','f2','f2','f2','f2','f4','f2','f4','f8','c4','c8','f2','f2','c4'],
['f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f8','c4','c8','f4','f4','c4'],
['f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','c8','c8','f8','f8','c8'],
['c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c8','c4','c8','c4','c4','c4'],
['c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8'],
['i*','u1','u2','u4','u8','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i*','f*','c*'],
['f*','f*','f*','f*','f*','f*','f*','f*','f*','bf','f2','f4','f8','c4','c8','f*','f*','c*'],
['c*','c*','c*','c*','c*','c*','c*','c*','c*','c4','c4','c4','c8','c4','c8','c*','c*','c*'],
]
elif config.explicit_x64_dtypes.value == config.ExplicitX64Mode.ALLOW:
# This differs from enable_x64=True only because i4xu4 -> i4 instead of s8.
expected = [
['b1','u1','u2','u4','u8','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i*','f*','c*'],
['u1','u1','u2','u4','u8','i2','i2','i4','i8','bf','f2','f4','f8','c4','c8','u1','f*','c*'],
['u2','u2','u2','u4','u8','i4','i4','i4','i8','bf','f2','f4','f8','c4','c8','u2','f*','c*'],
['u4','u4','u4','u4','u8','i4','i4','i4','i8','bf','f2','f4','f8','c4','c8','u4','f*','c*'],
['u8','u8','u8','u8','u8','f*','f*','f*','f*','bf','f2','f4','f8','c4','c8','u8','f*','c*'],
['i1','i2','i4','i4','f*','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i1','f*','c*'],
['i2','i2','i4','i4','f*','i2','i2','i4','i8','bf','f2','f4','f8','c4','c8','i2','f*','c*'],
['i4','i4','i4','i4','f*','i4','i4','i4','i8','bf','f2','f4','f8','c4','c8','i4','f*','c*'],
['i8','i8','i8','i8','f*','i8','i8','i8','i8','bf','f2','f4','f8','c4','c8','i8','f*','c*'],
['bf','bf','bf','bf','bf','bf','bf','bf','bf','bf','f4','f4','f8','c4','c8','bf','bf','c4'],
['f2','f2','f2','f2','f2','f2','f2','f2','f2','f4','f2','f4','f8','c4','c8','f2','f2','c4'],
['f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f8','c4','c8','f4','f4','c4'],
['f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','f8','c8','c8','f8','f8','c8'],
['c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c8','c4','c8','c4','c4','c4'],
['c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8','c8'],
['i*','u1','u2','u4','u8','i1','i2','i4','i8','bf','f2','f4','f8','c4','c8','i*','f*','c*'],
['f*','f*','f*','f*','f*','f*','f*','f*','f*','bf','f2','f4','f8','c4','c8','f*','f*','c*'],
['c*','c*','c*','c*','c*','c*','c*','c*','c*','c4','c4','c4','c8','c4','c8','c*','c*','c*'],
]
else:
expected = [
['b1','u1','u2','u4','u4','i1','i2','i4','i4','bf','f2','f4','f4','c4','c4','i*','f*','c*'],
['u1','u1','u2','u4','u4','i2','i2','i4','i4','bf','f2','f4','f4','c4','c4','u1','f*','c*'],
['u2','u2','u2','u4','u4','i4','i4','i4','i4','bf','f2','f4','f4','c4','c4','u2','f*','c*'],
['u4','u4','u4','u4','u4','i4','i4','i4','i4','bf','f2','f4','f4','c4','c4','u4','f*','c*'],
['u4','u4','u4','u4','u4','i4','i4','i4','i4','bf','f2','f4','f4','c4','c4','u4','f*','c*'],
['i1','i2','i4','i4','i4','i1','i2','i4','i4','bf','f2','f4','f4','c4','c4','i1','f*','c*'],
['i2','i2','i4','i4','i4','i2','i2','i4','i4','bf','f2','f4','f4','c4','c4','i2','f*','c*'],
['i4','i4','i4','i4','i4','i4','i4','i4','i4','bf','f2','f4','f4','c4','c4','i4','f*','c*'],
['i4','i4','i4','i4','i4','i4','i4','i4','i4','bf','f2','f4','f4','c4','c4','i4','f*','c*'],
['bf','bf','bf','bf','bf','bf','bf','bf','bf','bf','f4','f4','f4','c4','c4','bf','bf','c4'],
['f2','f2','f2','f2','f2','f2','f2','f2','f2','f4','f2','f4','f4','c4','c4','f2','f2','c4'],
['f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','c4','c4','f4','f4','c4'],
['f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','c4','c4','f4','f4','c4'],
['c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4'],
['c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4','c4'],
['i*','u1','u2','u4','u4','i1','i2','i4','i4','bf','f2','f4','f4','c4','c4','i*','f*','c*'],
['f*','f*','f*','f*','f*','f*','f*','f*','f*','bf','f2','f4','f4','c4','c4','f*','f*','c*'],
['c*','c*','c*','c*','c*','c*','c*','c*','c*','c4','c4','c4','c4','c4','c4','c*','c*','c*'],
]
typecode_to_dtype = {
'b1': jnp.bool_,
'u1': jnp.uint8, 'u2': jnp.uint16, 'u4': jnp.uint32, 'u8': jnp.uint64,
'i1': jnp.int8, 'i2': jnp.int16, 'i4': jnp.int32, 'i8': jnp.int64,
'bf': jnp.bfloat16, 'f2': jnp.float16, 'f4': jnp.float32, 'f8': jnp.float64,
'c4': jnp.complex64, 'c8': jnp.complex128,
'i*': jnp.int64, 'f*': jnp.float64, 'c*': jnp.complex128,
}
dtype_to_typecode = {jnp.dtype(v): k for k, v in typecode_to_dtype.items()
if not k.endswith('*')}
def typecode_to_val(typecode):
weak_type = typecode.endswith('*')
dtype = typecode_to_dtype[typecode]
val = dtype(0)
if weak_type:
val = val.item()
return val
def val_to_typecode(val):
dtype = dtypes.result_type(val)
weak_type = dtypes.is_weakly_typed(val)
typecode = dtype_to_typecode[dtype]
if weak_type:
typecode = typecode[:-1] + '*'
return typecode
if config.explicit_x64_dtypes.value == config.ExplicitX64Mode.ERROR:
self.skipTest("Test uses x64 types")
vals = [typecode_to_val(t) for t in typecodes]
table = [[val_to_typecode(v1 + v2) for v1 in vals] for v2 in vals]
def show_differences(expected, actual):
diffs = ""
for i, t1 in enumerate(typecodes):
for j, t2 in enumerate(typecodes):
if expected[i][j] != actual[i][j]:
diffs += f"\n{t1}, {t2} -> want {expected[i][j]}, got {actual[i][j]}"
return diffs
self.assertEqual(table, expected, show_differences(expected, table))
@parameterized.named_parameters(
{"testcase_name": "_xtype={}_ytype={}_xfun={}_yfun={}".format(
xtype.__name__, ytype.__name__, xfun.__name__, yfun.__name__),
"xtype": xtype, "ytype": ytype, "xfun": xfun, "yfun": yfun}
for xtype, ytype in itertools.product(
[int, float, jnp.int16, jnp.int32, jnp.float16, jnp.float32], repeat=2)
for xfun, yfun in itertools.product(
[identity, abs, jnp.array], repeat=2)
)
@jax.numpy_dtype_promotion('standard')
def testBinaryPromotionJitInvariance(self, xtype, ytype, xfun, yfun):
"""Test jit invariance of simple binary promotion rules with and without weak types."""
f = lambda x, y: xfun(x) + yfun(y)
args_maker = lambda: [xtype(1), ytype(1)]
self._CompileAndCheck(f, args_maker, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": f"_{dtype=}_{weak_type=}",
"dtype": dtype, "weak_type": weak_type}
for dtype in all_dtypes
for weak_type in [True, False]
)
def testUnaryPromotion(self, dtype, weak_type):
# Regression test for https://github.com/jax-ml/jax/issues/6051
if dtype in intn_dtypes:
self.skipTest("XLA support for int2 and int4 is incomplete.")
if dtype == dtypes.float8_e8m0fnu and jtu.test_device_matches(['tpu']):
self.skipTest("TPU does not support float8_e8m0fnu.")
if dtype == dtypes.float4_e2m1fn and jtu.test_device_matches(['tpu']):
self.skipTest("TPU does not support float4_e2m1fn.")
dtype = dtypes.canonicalize_dtype(dtype)
x = lax_internal._convert_element_type(0, dtype, weak_type=weak_type)
if weak_type:
expected = dtypes.default_types[
'f'
if x.dtype in ['bfloat16', *fp8_dtypes, *fp4_dtypes]
else x.dtype.kind
]()
else:
expected = x.dtype
self.assertEqual(dtypes.result_type(x), expected)
@jax.numpy_dtype_promotion('standard')
def testFloat8PromotionError(self):
for dtype in fp8_dtypes:
if dtype == dtypes.float8_e8m0fnu and jtu.test_device_matches(['tpu']):
# TPU does not support float8_e8m0fnu.
continue
x = jnp.array(1, dtype=dtype)
y = jnp.array(1, dtype='float32')
with self.assertRaisesRegex(dtypes.TypePromotionError,
".*8-bit floats do not support implicit promotion"):
x + y
@jax.numpy_dtype_promotion('standard')
def testFloat4PromotionError(self):
for dtype in fp4_dtypes:
if dtype == dtypes.float4_e2m1fn and jtu.test_device_matches(['tpu']):
# TPU does not support float4_e2m1fn.
continue
x = jnp.array(1, dtype=dtype)
y = jnp.array(1, dtype='float32')
with self.assertRaisesRegex(dtypes.TypePromotionError,
".*4-bit floats do not support implicit promotion"):
x + y
@jax.numpy_dtype_promotion('standard')
@jtu.run_on_devices('tpu')
def testInt2PromotionError(self):
for dtype in intn_dtypes:
if dtype.name == 'int2' or dtype.name == 'uint2':
# TODO(b/343490729): Remove continue once the bug is fixed.
continue
x = jnp.array(1, dtype=dtype)
y = jnp.array(1, dtype='int32')
with self.assertRaisesRegex(
dtypes.TypePromotionError,
'.*[24]-bit integers do not support implicit promotion',
):
x + y
@jtu.sample_product(
dtype=all_dtypes,
weak_type=[True, False],
promotion=['standard', 'strict'],
)
def testBinaryNonPromotion(self, dtype, weak_type, promotion):
if dtype in fp8_dtypes:
self.skipTest("XLA support for float8 is incomplete.")
if dtype in fp4_dtypes:
self.skipTest("XLA support for float4 is incomplete.")
if dtype in intn_dtypes:
self.skipTest("XLA support for int2 and int4 is incomplete.")
# Regression test for https://github.com/jax-ml/jax/issues/6051
dtype = dtypes.canonicalize_dtype(dtype)
x = lax_internal._convert_element_type(0, dtype, weak_type=weak_type)
with jax.numpy_dtype_promotion(promotion):
y = (x + x)
if promotion == 'standard' or not weak_type or dtype == dtypes.bool_:
expected_dtype = dtype
elif dtypes.issubdtype(dtype, np.complexfloating):
expected_dtype = np.complex128
elif dtypes.issubdtype(dtype, np.floating):
expected_dtype = np.float64
else:
expected_dtype = np.int64
# No boolean weak types.
expected_weak_type = weak_type and dtype != bool
expected_dtype = dtypes.canonicalize_dtype(expected_dtype)
self.assertEqual(y.dtype, expected_dtype)
self.assertEqual(dtypes.is_weakly_typed(y), expected_weak_type)
@parameterized.product(dtype=all_dtypes, weak_type=[True, False])
def testArrayRepr(self, dtype, weak_type):
if dtype in intn_dtypes:
if not jtu.test_device_matches(['tpu']):
self.skipTest('XLA support for int4 is incomplete.')
if dtypes.iinfo(dtype).bits == 2:
self.skipTest('XLA support for int2 is incomplete.')
if dtype == dtypes.float8_e8m0fnu and jtu.test_device_matches(['tpu']):
self.skipTest('TPU does not support float8_e8m0fnu.')
if dtype == dtypes.float4_e2m1fn and jtu.test_device_matches(['tpu']):
self.skipTest('TPU does not support float4_e2m1fn.')
dtype = dtypes.canonicalize_dtype(dtype)
val = lax_internal._convert_element_type(0, dtype, weak_type=weak_type)
rep = repr(val)
self.assertStartsWith(rep, 'Array(')
if weak_type:
self.assertEndsWith(rep, f"dtype={val.dtype.name}, weak_type=True)")
else:
self.assertEndsWith(rep, f"dtype={val.dtype.name})")
@jtu.sample_product(
input_dtype=jtu.dtypes.all + [bool, int, float, complex],
output_dtype=jtu.dtypes.all + [bool, int, float, complex],
numpy_dtype_promotion=['strict', 'standard']
)
def testSafeToCast(self, input_dtype, output_dtype, numpy_dtype_promotion):
with jax.numpy_dtype_promotion(numpy_dtype_promotion):
# First the special cases which are always safe:
always_safe = (
(input_dtype == output_dtype) or
(dtypes.issubdtype(output_dtype, np.integer) and input_dtype in {int}) or
(dtypes.issubdtype(output_dtype, np.floating) and input_dtype in {int, float}) or
(dtypes.issubdtype(output_dtype, np.complexfloating) and input_dtype in {int, float, complex})
)
if always_safe:
self.assertTrue(dtypes.safe_to_cast(input_dtype, output_dtype))
try:
result_dtype = dtypes.result_type(input_dtype, dtypes.canonicalize_dtype(output_dtype))
except dtypes.TypePromotionError:
result_dtype = None
if result_dtype is None and input_dtype != output_dtype:
with self.assertRaises(dtypes.TypePromotionError):
dtypes.safe_to_cast(input_dtype, output_dtype)
else:
self.assertEqual(dtypes.result_type(output_dtype) == result_dtype,
dtypes.safe_to_cast(input_dtype, output_dtype))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| TestPromotionTables |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/annotations1.py | {
"start": 379,
"end": 830
} | class ____(ClassA):
def func1(self) -> ClassA:
return ClassA()
# This should generate an error because ClassC
# is a forward reference, which is not allowed
# in a python source file.
def func2(self) -> ClassC | None:
return None
def func3(self) -> "ClassC | None":
return None
def func4(self) -> "ClassC | None":
return None
def func5(self) -> "int | None":
return None
| ClassB |
python | spack__spack | lib/spack/spack/extensions.py | {
"start": 6784,
"end": 7084
} | class ____(spack.error.SpackError):
"""Exception class thrown when a configured extension does not follow
the expected naming convention.
"""
def __init__(self, path):
super().__init__("{0} does not match the format for a Spack extension path.".format(path))
| ExtensionNamingError |
python | falconry__falcon | tests/test_headers.py | {
"start": 4504,
"end": 4676
} | class ____:
def __init__(self, vary):
self.vary = vary
def on_get(self, req, resp):
resp.text = '{}'
resp.vary = self.vary
| VaryHeaderResource |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py | {
"start": 2616,
"end": 3021
} | class ____:
def __exit__(self, __typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], tb: typing.Optional[TracebackType], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
| GoodTen |
python | davidhalter__parso | parso/python/tree.py | {
"start": 23756,
"end": 24513
} | class ____(PythonBaseNode):
__slots__ = ()
def get_path_for_name(self, name):
"""
The path is the list of names that leads to the searched name.
:return list of Name:
"""
try:
# The name may be an alias. If it is, just map it back to the name.
name = self._aliases()[name]
except KeyError:
pass
for path in self.get_paths():
if name in path:
return path[:path.index(name) + 1]
raise ValueError('Name should be defined in the import itself')
def is_nested(self):
return False # By default, sub classes may overwrite this behavior
def is_star_import(self):
return self.children[-1] == '*'
| Import |
python | pennersr__django-allauth | allauth/socialaccount/providers/notion/provider.py | {
"start": 850,
"end": 1747
} | class ____(OAuth2Provider):
id = "notion"
name = "Notion"
account_class = NotionAccount
oauth2_adapter_class = NotionOAuth2Adapter
def extract_uid(self, data):
"""
The unique identifier for Notion is a combination of the User ID
and the Workspace ID they have authorized the application with.
"""
user_id = data["owner"]["user"]["id"]
workspace_id = data["workspace_id"]
return "user-%s_workspace-%s" % (user_id, workspace_id)
def extract_common_fields(self, data):
user = data["owner"]["user"]
user["email"] = user["person"]["email"]
return user
def extract_email_addresses(self, data):
user = data["owner"]["user"]
email = user["person"]["email"]
return [EmailAddress(email=email, verified=False, primary=True)]
provider_classes = [NotionProvider]
| NotionProvider |
python | pytorch__pytorch | test/distributed/elastic/agent/server/test/api_test.py | {
"start": 10194,
"end": 15185
} | class ____(unittest.TestCase):
def setUp(self):
# Create minimal spec and agent for testing
self.spec = MagicMock()
self.spec.role = "test_role"
self.spec.get_entrypoint_name.return_value = "test_entrypoint"
self.spec.rdzv_handler.get_run_id.return_value = "test_run_id"
self.spec.rdzv_handler.get_backend.return_value = "test_backend"
self.spec.max_restarts = 3
self.agent = TestAgent(self.spec)
self.agent._remaining_restarts = 2
self.agent._total_execution_time = 42
# Setup worker group
self.worker_group = WorkerGroup(self.spec)
self.worker_group.group_world_size = 2
self.worker_group.group_rank = 1
self.agent._worker_group = self.worker_group
# Create a test worker
self.worker = Worker(
local_rank=0, global_rank=5, role_rank=3, world_size=8, role_world_size=4
)
self.worker.id = 12345
def test_construct_event_agent_success(self):
# Test constructing an agent success event
event = self.agent._construct_event(state="SUCCEEDED", source=EventSource.AGENT)
# Verify basic event properties
self.assertEqual(event.name, "torchelastic.worker.status.SUCCEEDED")
self.assertEqual(event.source, EventSource.AGENT)
# Verify metadata
metadata = event.metadata
self.assertEqual(metadata["run_id"], "test_run_id")
self.assertIsNone(metadata["global_rank"])
self.assertEqual(metadata["group_rank"], 1)
self.assertIsNone(metadata["worker_id"])
self.assertEqual(metadata["role"], "test_role")
self.assertEqual(metadata["state"], "SUCCEEDED")
self.assertEqual(metadata["total_run_time"], 42)
self.assertEqual(metadata["rdzv_backend"], "test_backend")
self.assertIsNone(metadata["raw_error"])
self.assertEqual(
metadata["agent_restarts"], 1
) # max_restarts - remaining_restarts
self.assertIsNone(metadata["duration_ms"])
# Verify JSON metadata
md_dict = json.loads(metadata["metadata"])
self.assertEqual(md_dict["group_world_size"], 2)
self.assertEqual(md_dict["entry_point"], "test_entrypoint")
def test_construct_event_worker_failure(self):
# Test constructing a worker failure event with raw error
raw_error = json.dumps(
{"error_message": "Test error", "traceback": "stack trace"}
)
event = self.agent._construct_event(
state="FAILED",
source=EventSource.WORKER,
worker=self.worker,
raw_error=raw_error,
exit_code=1,
)
# Verify basic event properties
self.assertEqual(event.name, "torchelastic.worker.status.FAILED")
self.assertEqual(event.source, EventSource.WORKER)
# Verify metadata
metadata = event.metadata
self.assertEqual(metadata["run_id"], "test_run_id")
self.assertEqual(metadata["global_rank"], 5)
self.assertEqual(metadata["group_rank"], 1)
self.assertEqual(metadata["worker_id"], "12345")
self.assertEqual(metadata["role"], "test_role")
self.assertEqual(metadata["state"], "FAILED")
self.assertEqual(metadata["total_run_time"], 42)
self.assertEqual(metadata["rdzv_backend"], "test_backend")
self.assertEqual(metadata["raw_error"], raw_error)
self.assertEqual(metadata["agent_restarts"], 1)
# Verify worker-specific metadata
md_dict = json.loads(metadata["metadata"])
self.assertEqual(md_dict["local_rank"], [0])
self.assertEqual(md_dict["role_rank"], [3])
self.assertEqual(md_dict["role_world_size"], [4])
self.assertEqual(md_dict["exit_code"], [1])
def test_construct_event_with_duration(self):
# Test constructing an event with duration_ms
event = self.agent._construct_event(
state="RENDEZVOUS", source=EventSource.AGENT, duration_ms=123.45
)
# Verify duration is set correctly
self.assertEqual(event.metadata["duration_ms"], 123.45)
def test_construct_event_worker_no_error(self):
# Test constructing a worker event without error info
event = self.agent._construct_event(
state="HEALTHY", source=EventSource.WORKER, worker=self.worker
)
# Verify error fields are None
metadata = event.metadata
self.assertIsNone(metadata["raw_error"])
# Check worker info is set
self.assertEqual(metadata["global_rank"], 5)
self.assertEqual(metadata["worker_id"], "12345")
# Check metadata JSON
md_dict = json.loads(metadata["metadata"])
self.assertEqual(md_dict["local_rank"], [0])
self.assertEqual(md_dict["role_rank"], [3])
self.assertEqual(md_dict["role_world_size"], [4])
self.assertNotIn("exit_code", [None])
| ConstructEventTest |
python | numpy__numpy | numpy/lib/tests/test_recfunctions.py | {
"start": 25452,
"end": 32751
} | class ____:
# Test stack_arrays
def _create_arrays(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
return w, x, y, z
def test_solo(self):
# Test stack_arrays on single arrays
x = self._create_arrays()[1]
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
_, x, y, _ = self._create_arrays()
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
_, x, _, z = self._create_arrays()
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
_, x, _, z = self._create_arrays()
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
z = self._create_arrays()[-1]
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
| TestStackArrays |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qarithmetic_test.py | {
"start": 1323,
"end": 1991
} | class ____(_QFunctionalBinaryArithmeticBenchmarkBase):
def init(self, N, dtype, contig, op_func):
super().setup(N, dtype, contig)
self.inputs = {
"q_input_a": self.q_input_a,
"q_input_b": self.q_input_a,
"scale": self.scale,
"zero_point": self.zero_point,
}
self.op_func = op_func
def forward(self, q_input_a, q_input_b, scale: float, zero_point: int):
return self.op_func(q_input_a, q_input_b, scale=scale, zero_point=zero_point)
op_bench.generate_pt_tests_from_op_list(
qarithmetic_binary_ops, qarithmetic_binary_configs, QFunctionalBenchmark
)
| QFunctionalBenchmark |
python | huggingface__transformers | src/transformers/models/pix2struct/processing_pix2struct.py | {
"start": 912,
"end": 1495
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": False,
"return_length": False,
"verbose": True,
},
"images_kwargs": {
"max_patches": 2048,
},
}
logger = logging.get_logger(__name__)
| Pix2StructProcessorKwargs |
python | tensorflow__tensorflow | tensorflow/python/distribute/parallel_device/parallel_device_test.py | {
"start": 19610,
"end": 23919
} | class ____(_VirtualDeviceTestCase):
def test_layer_forward(self):
with self.device:
layer = _Dense(5)
x = constant_op.constant([[2.]])
y = layer(x)
outputs = self.device.unpack(y)
self.assertAllClose([[3.] * 5], outputs[0])
self.assertAllClose([[3.] * 5], outputs[1])
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
# With different Layer inputs we get different outputs
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
with self.device:
y = layer(x)
outputs = self.device.unpack(y)
self.assertGreater(
math_ops.reduce_max(math_ops.abs(outputs[0] - outputs[1])), 1e-5)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_layer_sync_training(self):
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
with self.device:
layer = _Dense(5)
with backprop.GradientTape() as tape:
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
reduced_gradients = _collective_sum(unreduced_gradients, num_replicas=2)
for grad, param in zip(reduced_gradients, parameters):
param.assign_sub(0.01 * grad)
final_kernels = self.device.unpack(layer.kernel)
self.assertAllClose(final_kernels[0], final_kernels[1])
final_bias = self.device.unpack(layer.bias)
expected_bias = (1. - 0.01 * 2. * (1. + .5 - math_ops.range(5.)) -
0.01 * 2. * (1. - .5 - math_ops.range(5.)))
self.assertAllClose(expected_bias, final_bias[0], rtol=1e-4, atol=1e-4)
self.assertAllClose(expected_bias, final_bias[1], rtol=1e-4, atol=1e-4)
self.assertIn(self.device.components[0], final_kernels[0].backing_device)
self.assertIn(self.device.components[1], final_kernels[1].backing_device)
def test_layer_divergent_buffer_training(self):
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
with self.device:
layer = _Dense(5)
with backprop.GradientTape() as tape:
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
for grad, param in zip(unreduced_gradients, parameters):
param.assign_sub(0.01 * grad)
final_kernels = self.device.unpack(layer.kernel)
self.assertNotAllClose(final_kernels[0], final_kernels[1])
final_bias = self.device.unpack(layer.bias)
self.assertAllClose(1. - 0.01 * 2. * (1. - .5 - math_ops.range(5.)),
final_bias[0])
self.assertAllClose(1. - 0.01 * 2. * (1. + .5 - math_ops.range(5.)),
final_bias[1])
self.assertIn(self.device.components[0], final_kernels[0].backing_device)
self.assertIn(self.device.components[1], final_kernels[1].backing_device)
def test_training_loop(self):
self.skipTest("b/216201668: revisit parallel device and checkpointing")
for _ in range(5):
layer = _Dense(5)
checkpoint = tracking.Checkpoint(layer=layer)
manager = checkpoint_management.CheckpointManager(
checkpoint, directory=self.get_temp_dir(), max_to_keep=5)
manager.restore_or_initialize()
for _ in range(10):
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
with self.device:
with backprop.GradientTape() as tape:
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
reduced_gradients = _collective_sum(
unreduced_gradients, num_replicas=len(self.device.components))
for grad, param in zip(reduced_gradients, parameters):
param.assign_sub(0.01 * grad)
manager.save()
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| LayerTests |
python | astropy__astropy | astropy/utils/masked/tests/test_functions.py | {
"start": 20358,
"end": 22085
} | class ____(MaskedArraySetup):
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort1(self, axis):
ma_lexsort = np.lexsort((self.ma,), axis=axis)
filled = self.a.copy()
filled[self.mask_a] = 9e9
expected_data = filled.argsort(axis)
assert_array_equal(ma_lexsort, expected_data)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort2(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.ma, mb), axis=axis)
filled_a = self.ma.filled(9e9)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((filled_a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.ma), axis=axis)
expected_ba = np.lexsort((filled_b, filled_a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.ma], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort_mix(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.a, mb), axis=axis)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((self.a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.a), axis=axis)
expected_ba = np.lexsort((filled_b, self.a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.a], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
| TestMaskedArraySorting |
python | openai__openai-python | src/openai/types/responses/response_apply_patch_tool_call.py | {
"start": 1152,
"end": 1885
} | class ____(BaseModel):
id: str
"""The unique ID of the apply patch tool call.
Populated when this item is returned via API.
"""
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
operation: Operation
"""
One of the create_file, delete_file, or update_file operations applied via
apply_patch.
"""
status: Literal["in_progress", "completed"]
"""The status of the apply patch tool call. One of `in_progress` or `completed`."""
type: Literal["apply_patch_call"]
"""The type of the item. Always `apply_patch_call`."""
created_by: Optional[str] = None
"""The ID of the entity that created this tool call."""
| ResponseApplyPatchToolCall |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 3258,
"end": 3640
} | class ____(str):
@typing_extensions.overload
def __new__(cls, foo: int) -> StrangeButAcceptableSubclass:
...
@typing_extensions.overload
def __new__(cls, *args: Any, **kwargs: Any) -> StrangeButAcceptable:
...
def __str__(self) -> StrangeButAcceptable:
...
def __repr__(self) -> StrangeButAcceptable:
...
| StrangeButAcceptable |
python | sqlalchemy__sqlalchemy | test/orm/test_options.py | {
"start": 45051,
"end": 50329
} | class ____(PathTest, QueryTest):
run_create_tables = False
run_inserts = None
run_deletes = None
def _assert_opts(self, q, sub_opt, non_sub_opts):
attr_a = {}
q1 = q.options(sub_opt)._compile_context()
q2 = q.options(*non_sub_opts)._compile_context()
attr_a = {
k: v
for k, v in q1.attributes.items()
if isinstance(k, tuple) and k[0] == "loader"
}
attr_b = {
k: v
for k, v in q2.attributes.items()
if isinstance(k, tuple) and k[0] == "loader"
}
def strat_as_tuple(strat):
return (
strat.strategy,
strat.local_opts,
getattr(strat, "_of_type", None),
strat.is_class_strategy,
strat.is_opts_only,
)
eq_(
{path: strat_as_tuple(load) for path, load in attr_a.items()},
{path: strat_as_tuple(load) for path, load in attr_b.items()},
)
def test_one(self):
User, Address, Order, Item, SubItem = self.classes(
"User", "Address", "Order", "Item", "SubItem"
)
sub_opt = joinedload(User.orders).options(
joinedload(Order.items).options(defer(Item.description)),
defer(Order.description),
)
non_sub_opts = [
joinedload(User.orders),
defaultload(User.orders)
.joinedload(Order.items)
.defer(Item.description),
defaultload(User.orders).defer(Order.description),
]
sess = fixture_session()
self._assert_opts(sess.query(User), sub_opt, non_sub_opts)
def test_two(self):
User, Address, Order, Item, SubItem = self.classes(
"User", "Address", "Order", "Item", "SubItem"
)
sub_opt = defaultload(User.orders).options(
joinedload(Order.items),
defaultload(Order.items).options(subqueryload(Item.keywords)),
defer(Order.description),
)
non_sub_opts = [
defaultload(User.orders)
.joinedload(Order.items)
.subqueryload(Item.keywords),
defaultload(User.orders).defer(Order.description),
]
sess = fixture_session()
self._assert_opts(sess.query(User), sub_opt, non_sub_opts)
def test_three(self):
User, Address, Order, Item, SubItem = self.classes(
"User", "Address", "Order", "Item", "SubItem"
)
sub_opt = defaultload(User.orders).options(defer("*"))
non_sub_opts = [defaultload(User.orders).defer("*")]
sess = fixture_session()
self._assert_opts(sess.query(User), sub_opt, non_sub_opts)
def test_four(self):
User, Address, Order, Item, SubItem, Keyword = self.classes(
"User", "Address", "Order", "Item", "SubItem", "Keyword"
)
sub_opt = joinedload(User.orders).options(
defer(Order.description),
joinedload(Order.items).options(
joinedload(Item.keywords).options(defer(Keyword.name)),
defer(Item.description),
),
)
non_sub_opts = [
joinedload(User.orders),
defaultload(User.orders).defer(Order.description),
defaultload(User.orders).joinedload(Order.items),
defaultload(User.orders)
.defaultload(Order.items)
.joinedload(Item.keywords),
defaultload(User.orders)
.defaultload(Order.items)
.defer(Item.description),
defaultload(User.orders)
.defaultload(Order.items)
.defaultload(Item.keywords)
.defer(Keyword.name),
]
sess = fixture_session()
self._assert_opts(sess.query(User), sub_opt, non_sub_opts)
def test_five(self):
User, Address, Order, Item, SubItem, Keyword = self.classes(
"User", "Address", "Order", "Item", "SubItem", "Keyword"
)
sub_opt = joinedload(User.orders).options(load_only(Order.description))
non_sub_opts = [
joinedload(User.orders),
defaultload(User.orders).load_only(Order.description),
]
sess = fixture_session()
self._assert_opts(sess.query(User), sub_opt, non_sub_opts)
def test_invalid_one(self):
User, Address, Order, Item, SubItem = self.classes(
"User", "Address", "Order", "Item", "SubItem"
)
with expect_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Item.keywords" does '
r"not link from "
r'relationship "User.orders"',
):
[
joinedload(User.orders).joinedload(Item.keywords),
defaultload(User.orders).joinedload(Order.items),
]
with expect_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Item.keywords" does '
r"not link from "
r'relationship "User.orders"',
):
joinedload(User.orders).options(
joinedload(Item.keywords), joinedload(Order.items)
)
| SubOptionsTest |
python | walkccc__LeetCode | solutions/785. Is Graph Bipartite?/785-2.py | {
"start": 79,
"end": 702
} | class ____:
def isBipartite(self, graph: list[list[int]]) -> bool:
colors = [Color.WHITE] * len(graph)
def isValidColor(u: int, color: Color) -> bool:
# The painted color should be same as `color`.
if colors[u] != Color.WHITE:
return colors[u] == color
colors[u] = color
# All the children should have valid colors.
childrenColor = Color.RED if colors[u] == Color.GREEN else Color.GREEN
return all(isValidColor(v, childrenColor) for v in graph[u])
return all(colors[i] != Color.WHITE or isValidColor(i, Color.RED)
for i in range(len(graph)))
| Solution |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/custom_io_manager.py | {
"start": 1291,
"end": 1556
} | class ____(dg.ConfigurableIOManagerFactory):
api_token: str
def create_io_manager(self, context) -> ExternalIOManager:
return ExternalIOManager(self.api_token)
# end_io_manager_factory_marker
# start_partitioned_marker
| ConfigurableExternalIOManager |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis16.py | {
"start": 315,
"end": 2159
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis16.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [43572608, 43812736]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
chart.set_x_axis(
{"minor_unit": 14, "major_unit": 1, "major_unit_type": "months"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 44867,
"end": 46666
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("lv_LV")
Faker.seed(0)
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.first_names
assert name in LvProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.first_names
assert name in LvProvider.first_names_male
def test_last_name(self):
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.last_names_female + LvProvider.last_names_nonbinary
# Females only last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.last_names_female
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.last_names_male + LvProvider.last_names_nonbinary
# Male only last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in LvProvider.last_names_male
| TestLvLV |
python | numba__numba | numba/tests/npyufunc/test_vectorize_decor.py | {
"start": 4067,
"end": 4504
} | class ____(BaseVectorizeUnrecognizedArg):
def test_target_cpu_unrecognized_arg(self):
self._test_target_unrecognized_arg('cpu')
def test_target_cpu_unrecognized_arg_no_sig(self):
self._test_target_unrecognized_arg('cpu', False)
def test_target_parallel_unrecognized_arg(self):
self._test_target_unrecognized_arg('parallel')
if __name__ == '__main__':
unittest.main()
| TestVectorizeUnrecognizedArg |
python | great-expectations__great_expectations | great_expectations/render/renderer/slack_renderer.py | {
"start": 537,
"end": 8128
} | class ____(Renderer):
def render(
self,
validation_result: ExpectationSuiteValidationResult,
data_docs_pages: dict[ValidationResultIdentifier, dict[str, str]] | None = None,
notify_with: list[str] | None = None,
validation_result_urls: list[str] | None = None,
) -> list[dict]:
data_docs_pages = data_docs_pages or {}
notify_with = notify_with or []
validation_result_urls = validation_result_urls or []
blocks: list[dict] = []
description_block = self._build_description_block(
validation_result=validation_result,
validation_result_urls=validation_result_urls,
)
blocks.append(description_block)
for data_docs_page in data_docs_pages.values():
report_element_block = self._build_report_element_block(
data_docs_page=data_docs_page, notify_with=notify_with
)
if report_element_block:
blocks.append(report_element_block)
return blocks
def _build_description_block(
self,
validation_result: ExpectationSuiteValidationResult,
validation_result_urls: list[str],
) -> dict:
validation_link = None
summary_text = ""
if validation_result_urls:
if len(validation_result_urls) == 1:
validation_link = validation_result_urls[0]
n_checks_succeeded = validation_result.statistics["successful_expectations"]
n_checks = validation_result.statistics["evaluated_expectations"]
check_details_text = f"*{n_checks_succeeded}* of *{n_checks}* Expectations were met"
expectation_suite_name = validation_result.suite_name
data_asset_name = validation_result.asset_name or "__no_data_asset_name__"
summary_text += f"\n*Asset*: `{data_asset_name}` "
# Slack does not allow links to local files due to security risks
# DataDocs links will be added in a block after this summary text when applicable
if validation_link and "file://" not in validation_link:
summary_text += (
f"\n*Expectation Suite*: {expectation_suite_name} <{validation_link}|View Results>"
)
else:
summary_text += f"\n*Expectation Suite*: `{expectation_suite_name}`"
summary_text += f"\n*Summary*: {check_details_text}"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": summary_text,
},
}
def concatenate_text_blocks(
self,
action_name: str,
text_blocks: list[dict],
success: bool,
checkpoint_name: str,
run_id: RunIdentifier,
) -> dict:
all_blocks = [
self._build_header(name=action_name, success=success, checkpoint_name=checkpoint_name)
]
all_blocks.append(self._build_run_time_block(run_id=run_id))
for block in text_blocks:
all_blocks.append(block)
all_blocks.append(self._build_divider())
return {"blocks": all_blocks}
def _build_header(self, name: str, success: bool, checkpoint_name: str) -> dict:
status = "Success :white_check_mark:" if success else "Failure :no_entry:"
return {
"type": "header",
"text": {
"type": "plain_text",
"text": f"{name} - {checkpoint_name} - {status}",
},
}
def _build_run_time_block(self, run_id: RunIdentifier) -> dict:
if run_id is not None:
run_time = datetime.fromisoformat(str(run_id.run_time))
formatted_run_time = run_time.strftime("%Y/%m/%d %I:%M %p")
return {
"type": "section",
"text": {"type": "plain_text", "text": f"Runtime: {formatted_run_time}"},
}
def _build_divider(self) -> dict:
return {"type": "divider"}
def _build_footer(self) -> dict:
documentation_url = "https://docs.greatexpectations.io/docs/terms/data_docs"
return {
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": f"Learn how to review validation results in Data Docs: {documentation_url}", # noqa: E501 # FIXME CoP
}
],
}
def _get_report_element(self, docs_link: str) -> dict[str, Any] | None:
report_element = None
if docs_link:
try:
# Slack does not allow links to local files due to security risks
if "file://" in docs_link:
report_element = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*DataDocs* can be found here: `{docs_link}` \n (Please copy and paste link into " # noqa: E501 # FIXME CoP
f"a browser to view)\n",
},
}
else:
report_element = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*DataDocs* can be found here: <{docs_link}|{docs_link}>",
},
}
except Exception as e:
logger.warning(
f"""SlackRenderer had a problem with generating the docs link.
link used to generate the docs link is: {docs_link} and is of type: {type(docs_link)}.
Error: {e}""" # noqa: E501 # FIXME CoP
)
return
else:
logger.warning("No docs link found. Skipping data docs link in Slack message.")
return report_element
def _build_report_element_block(
self, data_docs_page: dict[str, str], notify_with: list[str]
) -> dict | None:
if not data_docs_page:
return None
if notify_with:
for docs_link_key in notify_with:
if docs_link_key in data_docs_page:
docs_link = data_docs_page[docs_link_key]
report_element = self._get_report_element(docs_link)
else:
logger.critical(
f"*ERROR*: Slack is trying to provide a link to the following DataDocs: `"
f"{docs_link_key!s}`, but it is not configured under `data_docs_sites` in the " # noqa: E501 # FIXME CoP
f"`great_expectations.yml`\n"
)
report_element = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*ERROR*: Slack is trying to provide a link to the following DataDocs: " # noqa: E501 # FIXME CoP
f"`{docs_link_key!s}`, but it is not configured under "
f"`data_docs_sites` in the `great_expectations.yml`\n",
},
}
if report_element:
return report_element
else:
for docs_link_key, docs_link in data_docs_page.items():
if docs_link_key == "class":
continue
report_element = self._get_report_element(docs_link)
return report_element
return None
| SlackRenderer |
python | getsentry__sentry | tests/sentry/releases/use_cases/test_release_serializer.py | {
"start": 346,
"end": 16531
} | class ____(TestCase):
"""
Tests for the releases.use_cases.release.serialize function.
This tests the NEW serializer that fixes the per-project newGroups calculation,
as opposed to the old model-based serializer in api.serializers.models.release.
"""
def test_new_groups_single_release_per_project(self):
"""
Test new groups counts for one release with multiple projects, each having different issue counts.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
# Create release in projects A and B
release_version = "1.0.0"
release = Release.objects.create(
organization_id=project_a.organization_id, version=release_version
)
release.add_project(project_a)
release.add_project(project_b)
# 3 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release, project=project_b).update(new_groups=2)
result = release_serializer(
releases=[release],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[], # No environment filtering
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
# total new groups count (5 == 3 + 2)
assert serialized_release["newGroups"] == 5
# new groups count for each project (3 for A, 2 for B)
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
assert projects[project_a.id]["name"] == "Project A"
assert projects[project_a.id]["slug"] == "project-a"
assert projects[project_b.id]["name"] == "Project B"
assert projects[project_b.id]["slug"] == "project-b"
def test_new_groups_multiple_releases_per_project(self):
"""
Test new groups count for multiple releases per project.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
# Create releases 1 and 2, both in projects A and B
release_1 = Release.objects.create(
organization_id=project_a.organization_id, version="1.0.0"
)
release_1.add_project(project_a)
release_1.add_project(project_b)
release_2 = Release.objects.create(
organization_id=project_a.organization_id, version="2.0.0"
)
release_2.add_project(project_a)
release_2.add_project(project_b)
# Release 1.0.0 has 3 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release_1, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release_1, project=project_b).update(new_groups=2)
# Release 2.0.0 has 1 new groups for project A, 4 new groups for project B
ReleaseProject.objects.filter(release=release_2, project=project_a).update(new_groups=1)
ReleaseProject.objects.filter(release=release_2, project=project_b).update(new_groups=4)
# 1. Serialize Release 1.0.0
result = release_serializer(
releases=[release_1],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
assert serialized_release["version"] == "1.0.0"
assert serialized_release["newGroups"] == 5 # total new groups count (5 == 3 + 2)
projects = {p["id"]: p for p in serialized_release["projects"]}
# new groups count for each project (3 for A, 2 for B)
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
# 2. Serialize Release 2.0.0
result = release_serializer(
releases=[release_2],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
assert serialized_release["version"] == "2.0.0"
assert serialized_release["newGroups"] == 5 # total new groups count (5 == 1 + 4)
projects = {p["id"]: p for p in serialized_release["projects"]}
# new groups count for each project (1 for A, 4 for B)
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 4
# 3. Serialize both releases together
result = release_serializer(
releases=[release_1, release_2],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[],
projects=[project_a, project_b],
)
assert len(result) == 2
serialized_releases = {r["version"]: r for r in result}
serialized_release_1 = serialized_releases["1.0.0"]
serialized_release_2 = serialized_releases["2.0.0"]
# both new group counts should be 5
assert serialized_release_1["newGroups"] == 5
assert serialized_release_2["newGroups"] == 5
# new groups counts for each project
projects_1 = {p["id"]: p for p in serialized_release_1["projects"]}
projects_2 = {p["id"]: p for p in serialized_release_2["projects"]}
assert projects_1[project_a.id]["newGroups"] == 3
assert projects_1[project_b.id]["newGroups"] == 2
assert projects_2[project_a.id]["newGroups"] == 1
assert projects_2[project_b.id]["newGroups"] == 4
def test_new_groups_environment_filtering(self):
"""
Test new group counts for a single release with environment filtering.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
production = self.create_environment(name="production", organization=project_a.organization)
staging = self.create_environment(name="staging", organization=project_a.organization)
release = Release.objects.create(organization_id=project_a.organization_id, version="1.0.0")
release.add_project(project_a)
release.add_project(project_b)
# 4 new groups for project A, 2 new groups for project B
ReleaseProject.objects.filter(release=release, project=project_a).update(new_groups=4)
ReleaseProject.objects.filter(release=release, project=project_b).update(new_groups=2)
# Project A: 3 issues in production, 1 issue in staging (total = 4)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_a, environment=production, new_issues_count=3
)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_a, environment=staging, new_issues_count=1
)
# Project B: 2 issues in production, 0 issues in staging (total = 2)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_b, environment=production, new_issues_count=2
)
ReleaseProjectEnvironment.objects.create(
release=release, project=project_b, environment=staging, new_issues_count=0
)
# 1. No environment filter
result = release_serializer(
releases=[release],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
assert serialized_release["newGroups"] == 6
# 2. Filter by production environment
result = release_serializer(
releases=[release],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[production.id],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
assert serialized_release["newGroups"] == 5
# 3. Filter by staging environment
result = release_serializer(
releases=[release],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[staging.id],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 0
assert serialized_release["newGroups"] == 1
# 4. Filter by both environments
result = release_serializer(
releases=[release],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[production.id, staging.id],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
assert serialized_release["newGroups"] == 6
def test_new_groups_cross_project_release_environment(self):
"""
Test new group counts for multiple releases with different environments.
"""
project_a = self.create_project(name="Project A", slug="project-a")
project_b = self.create_project(
name="Project B", slug="project-b", organization=project_a.organization
)
production = self.create_environment(name="production", organization=project_a.organization)
staging = self.create_environment(name="staging", organization=project_a.organization)
release_1 = Release.objects.create(
organization_id=project_a.organization_id, version="1.0.0"
)
release_1.add_project(project_a)
release_1.add_project(project_b)
release_2 = Release.objects.create(
organization_id=project_a.organization_id, version="2.0.0"
)
release_2.add_project(project_a)
release_2.add_project(project_b)
# Release 1.0.0: Project A = 4 (3+1), Project B = 2 (2+0)
ReleaseProject.objects.filter(release=release_1, project=project_a).update(new_groups=4)
ReleaseProject.objects.filter(release=release_1, project=project_b).update(new_groups=2)
# Release 2.0.0: Project A = 3 (1+2), Project B = 5 (4+1)
ReleaseProject.objects.filter(release=release_2, project=project_a).update(new_groups=3)
ReleaseProject.objects.filter(release=release_2, project=project_b).update(new_groups=5)
# Release 1.0.0 - Project A: 3 in production, 1 in staging
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_a, environment=production, new_issues_count=3
)
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_a, environment=staging, new_issues_count=1
)
# Release 1.0.0 - Project B: 2 in production, 0 in staging (no staging record)
ReleaseProjectEnvironment.objects.create(
release=release_1, project=project_b, environment=production, new_issues_count=2
)
# Release 2.0.0 - Project A: 1 in production, 2 in staging
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_a, environment=production, new_issues_count=1
)
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_a, environment=staging, new_issues_count=2
)
# Release 2.0.0 - Project B: 4 in production, 1 in staging
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_b, environment=production, new_issues_count=4
)
ReleaseProjectEnvironment.objects.create(
release=release_2, project=project_b, environment=staging, new_issues_count=1
)
# 1. Serialize Release 1.0.0 with production filter
result = release_serializer(
releases=[release_1],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[production.id],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
assert serialized_release["version"] == "1.0.0"
assert serialized_release["newGroups"] == 5
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 3
assert projects[project_b.id]["newGroups"] == 2
# 2. Serialize Release 2.0.0 with production filter
result = release_serializer(
releases=[release_2],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[production.id],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
assert serialized_release["version"] == "2.0.0"
assert serialized_release["newGroups"] == 5
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 1
assert projects[project_b.id]["newGroups"] == 4
# 3. Serialize both releases together with production filter
result = release_serializer(
releases=[release_1, release_2],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[production.id],
projects=[project_a, project_b],
)
assert len(result) == 2
serialized_releases = {r["version"]: r for r in result}
serialized_release_1 = serialized_releases["1.0.0"]
serialized_release_2 = serialized_releases["2.0.0"]
assert serialized_release_1["newGroups"] == 5
assert serialized_release_2["newGroups"] == 5
projects_1 = {p["id"]: p for p in serialized_release_1["projects"]}
projects_2 = {p["id"]: p for p in serialized_release_2["projects"]}
assert projects_1[project_a.id]["newGroups"] == 3
assert projects_1[project_b.id]["newGroups"] == 2
assert projects_2[project_a.id]["newGroups"] == 1
assert projects_2[project_b.id]["newGroups"] == 4
# 5. Serialize Release 1.0.0 with no environment filter
result = release_serializer(
releases=[release_1],
user=self.user,
organization_id=project_a.organization_id,
environment_ids=[],
projects=[project_a, project_b],
)
assert len(result) == 1
serialized_release = result[0]
assert serialized_release["newGroups"] == 6
projects = {p["id"]: p for p in serialized_release["projects"]}
assert projects[project_a.id]["newGroups"] == 4
assert projects[project_b.id]["newGroups"] == 2
| ReleaseSerializerUseCaseTest |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 133718,
"end": 135777
} | class ____(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
if len(node.stats) == 1 and isinstance(node.stats[0], Nodes.StatListNode) and not node.stats[0].stats:
del node.stats[:]
for idx, stat in enumerate(node.stats, 1):
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
def visit_TryFinallyStatNode(self, node):
self.visitchildren(node)
if node.finally_clause.is_terminator:
node.is_terminator = True
return node
def visit_PassStatNode(self, node):
"""Eliminate useless PassStatNode"""
# 'pass' statements often appear in a separate line and must be traced.
if not self.current_directives['linetrace']:
node = Nodes.StatListNode(pos=node.pos, stats=[])
return node
| RemoveUnreachableCode |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 971,
"end": 6166
} | class ____(scale_discrete):
"""
Base class for discrete position scales
"""
def __post_init__(self):
super().__post_init__()
# Keeps two ranges, range and range_c
self._range_c = RangeContinuous()
if isinstance(self.limits, tuple):
self.limits = list(self.limits)
# All positions have no guide
self.guide = None
def reset(self):
# Can't reset discrete scale because
# no way to recover values
self._range_c.reset()
def is_empty(self) -> bool:
return super().is_empty() and self._range_c.is_empty()
def train(self, x, drop=False):
# The discrete position scale is capable of doing
# training for continuous data.
# This complicates training and mapping, but makes it
# possible to place objects at non-integer positions,
# as is necessary for jittering etc.
if array_kind.continuous(x):
self._range_c.train(x)
else:
self._range.train(x, drop=self.drop)
def map(self, x, limits=None):
# Discrete values are converted into integers starting
# at 1
if limits is None:
limits = self.final_limits
if array_kind.discrete(x):
# TODO: Rewrite without using numpy
seq = np.arange(1, len(limits) + 1)
idx = np.asarray(match(x, limits, nomatch=len(x)))
if not len(idx):
return []
try:
seq = seq[idx]
except IndexError:
# Deal with missing data
# - Insert NaN where there is no match
seq = np.hstack((seq.astype(float), np.nan))
idx = np.clip(idx, 0, len(seq) - 1)
seq = seq[idx]
return list(seq)
return list(x)
@property
def final_limits(self):
if self.is_empty():
return (0, 1)
elif self.limits is not None and not callable(self.limits):
return self.limits
elif self.limits is None:
# discrete range
return self._range.range
elif callable(self.limits):
limits = self.limits(self._range.range)
# Functions that return iterators e.g. reversed
if iter(limits) is limits:
limits = list(limits)
return limits
else:
raise PlotnineError("Lost, do not know what the limits are.")
def dimension(self, expand=(0, 0, 0, 0), limits=None):
"""
Get the phyical size of the scale
Unlike limits, this always returns a numeric vector of length 2
"""
from mizani.bounds import expand_range_distinct
if limits is None:
limits = self.final_limits
if self.is_empty():
return (0, 1)
if self._range.is_empty(): # only continuous
return expand_range_distinct(self._range_c.range, expand)
elif self._range_c.is_empty(): # only discrete
# FIXME: I think this branch should not exist
return expand_range_distinct((1, len(self.final_limits)), expand)
else: # both
# e.g categorical bar plot have discrete items, but
# are plot on a continuous x scale
a = np.hstack(
[
self._range_c.range,
expand_range_distinct((1, len(self._range.range)), expand),
]
)
return a.min(), a.max()
def expand_limits(
self,
limits: Sequence[str],
expand: tuple[float, float] | tuple[float, float, float, float],
coord_limits: tuple[float, float],
trans: trans,
) -> range_view:
# Turn discrete limits into a tuple of continuous limits
if self.is_empty():
climits = (0, 1)
else:
climits = (1, len(limits))
self._range_c.range
if coord_limits is not None:
# - Override None in coord_limits
# - Expand limits in coordinate space
# - Remove any computed infinite values &
c0, c1 = coord_limits
climits = (
climits[0] if c0 is None else c0,
climits[1] if c1 is None else c1,
)
# Expand discrete range
rv_d = expand_range(climits, expand, trans)
if self._range_c.is_empty():
return rv_d
# Expand continuous range
no_expand = self.default_expansion(0, 0)
rv_c = expand_range(self._range_c.range, no_expand, trans)
# Merge the ranges
rv = range_view(
range=(
min(chain(rv_d.range, rv_c.range)),
max(chain(rv_d.range, rv_c.range)),
),
range_coord=(
min(chain(rv_d.range_coord, rv_c.range_coord)),
max(chain(rv_d.range_coord, rv_c.range_coord)),
),
)
rv.range = min(rv.range), max(rv.range)
rv.range_coord = min(rv.range_coord), max(rv.range_coord)
return rv
@dataclass(kw_only=True)
| scale_position_discrete |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops.py | {
"start": 38547,
"end": 40669
} | class ____(Initializer):
"""Initializer that generates orthogonal kernel for ConvNets.
Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution.
Args:
gain: multiplicative factor to apply to the orthogonal matrix. Default is 1.
The 2-norm of an input is multiplied by a factor of `gain` after applying
this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
# Helper functions.
def _orthogonal_matrix(self, n):
"""Construct an n x n orthogonal matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal matrix.
"""
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = gen_linalg_ops.qr(a)
d = array_ops.diag_part(r)
# make q uniform
q *= math_ops.sign(d)
return q
def _symmetric_projection(self, n):
"""Compute a n x n symmetric projection matrix.
Args:
n: Dimension.
Returns:
A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.
"""
q = self._orthogonal_matrix(n)
# randomly zeroing out some columns
mask = math_ops.cast(
random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)
if self.seed:
self.seed += 1
c = math_ops.multiply(q, mask)
return math_ops.matmul(c, array_ops.matrix_transpose(c))
| ConvolutionOrthogonal |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1556847,
"end": 1557053
} | class ____(MultiTimeUnit):
"""UtcMultiTimeUnit schema wrapper."""
_schema = {"$ref": "#/definitions/UtcMultiTimeUnit"}
def __init__(self, *args):
super().__init__(*args)
| UtcMultiTimeUnit |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 54328,
"end": 67755
} | class ____:
@make_xp_test_case(signal.resample, signal.resample_poly)
@xfail_xp_backends("cupy", reason="does not raise with non-int upsampling factor")
def test_basic(self, xp):
# Some basic tests
# Regression test for issue #3603.
# window.shape must equal to sig.shape[0]
sig = xp.arange(128, dtype=xp.float64)
num = 256
win = signal.get_window(('kaiser', 8.0), 160, xp=xp)
assert_raises(ValueError, signal.resample, sig, num, window=win)
assert_raises(ValueError, signal.resample, sig, num, domain='INVALID')
# Other degenerate conditions
assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
assert_raises(ValueError, signal.resample_poly, sig, 1.3, 2)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1.3)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='')
assert_raises(ValueError, signal.resample_poly, sig, 2, 1,
padtype='mean', cval=10)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1, window=xp.eye(2))
# test for issue #6505 - should not modify window.shape when axis ≠ 0
sig2 = xp.tile(xp.arange(160, dtype=xp.float64), (2, 1))
signal.resample(sig2, num, axis=-1, window=win)
assert win.shape == (160,)
# Ensure coverage for parameter cval=None and cval != None:
x_ref = signal.resample_poly(sig, 2, 1)
x0 = signal.resample_poly(sig, 2, 1, padtype='constant')
x1 = signal.resample_poly(sig, 2, 1, padtype='constant', cval=0)
xp_assert_equal(x1, x_ref)
xp_assert_equal(x0, x_ref)
@pytest.mark.parametrize('window', (None, 'hamming'))
@pytest.mark.parametrize('N', (20, 19))
@pytest.mark.parametrize('num', (100, 101, 10, 11))
@make_xp_test_case(signal.resample)
def test_rfft(self, N, num, window, xp):
# Make sure the speed up using rfft gives the same result as the normal
# way using fft
dt_r = xp_default_dtype(xp)
dt_c = xp.complex64 if dt_r == xp.float32 else xp.complex128
x = xp.linspace(0, 10, N, endpoint=False)
y = xp.cos(-x**2/6.0)
desired = signal.resample(xp.astype(y, dt_c), num, window=window)
xp_assert_close(signal.resample(y, num, window=window),
xp.real(desired))
y = xp.stack([xp.cos(-x**2/6.0), xp.sin(-x**2/6.0)])
y_complex = xp.astype(y, dt_c)
resampled = signal.resample(y_complex, num, axis=1, window=window)
atol = 1e-9 if dt_r == xp.float64 else 3e-7
xp_assert_close(
signal.resample(y, num, axis=1, window=window),
xp.real(resampled),
atol=atol)
@make_xp_test_case(signal.resample)
def test_input_domain(self, xp):
# Test if both input domain modes produce the same results.
tsig = xp.astype(xp.arange(256), xp.complex128)
fsig = sp_fft.fft(tsig)
num = 256
xp_assert_close(
signal.resample(fsig, num, domain='freq'),
signal.resample(tsig, num, domain='time'),
atol=1e-9)
@pytest.mark.parametrize('nx', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('ny', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('dtype', ('float64', 'complex128'))
@make_xp_test_case(signal.resample)
def test_dc(self, nx, ny, dtype, xp):
dtype = getattr(xp, dtype)
x = xp.asarray([1] * nx, dtype=dtype)
y = signal.resample(x, ny)
xp_assert_close(y, xp.asarray([1] * ny, dtype=y.dtype))
@skip_xp_backends("cupy", reason="padtype not supported by upfirdn")
@pytest.mark.parametrize('padtype', padtype_options)
@make_xp_test_case(signal.resample_poly)
def test_mutable_window(self, padtype, xp):
# Test that a mutable window is not modified
impulse = xp.zeros(3)
window = xp.asarray(np.random.RandomState(0).randn(2))
window_orig = xp.asarray(window, copy=True)
signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype)
xp_assert_equal(window, window_orig)
@skip_xp_backends("cupy", reason="padtype not supported by upfirdn")
@make_xp_test_case(signal.resample_poly)
@pytest.mark.parametrize('padtype', padtype_options)
def test_output_float32(self, padtype, xp):
# Test that float32 inputs yield a float32 output
x = xp.arange(10, dtype=xp.float32)
h = xp.asarray([1, 1, 1], dtype=xp.float32)
y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype)
assert y.dtype == xp.float32
@pytest.mark.parametrize('padtype', padtype_options)
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
@skip_xp_backends("cupy", reason="padtype not supported by upfirdn")
@make_xp_test_case(signal.resample_poly)
def test_output_match_dtype(self, padtype, dtype, xp):
# Test that the dtype of x is preserved per issue #14733
dtype = getattr(xp, dtype)
x = xp.arange(10, dtype=dtype)
y = signal.resample_poly(x, 1, 2, padtype=padtype)
assert y.dtype == x.dtype
@skip_xp_backends("cupy", reason="padtype not supported by upfirdn")
@pytest.mark.parametrize(
"method, ext, padtype",
[("fft", False, None)]
+ list(
product(
["polyphase"], [False, True], padtype_options,
)
),
)
@make_xp_test_case(signal.resample, signal.resample_poly)
def test_resample_methods(self, method, ext, padtype, xp):
# Test resampling of sinusoids and random noise (1-sec)
rate = 100
rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
# Sinusoids, windowed to avoid edge artifacts
t = xp.arange(rate, dtype=xp.float64) / float(rate)
freqs = xp.asarray((1., 10., 40.))[:, xp.newaxis]
x = xp.sin(2 * xp.pi * freqs * t) * hann(rate, xp=xp)
for rate_to in rates_to:
t_to = xp.arange(rate_to, dtype=xp.float64) / float(rate_to)
y_tos = xp.sin(2 * xp.pi * freqs * t_to) * hann(rate_to, xp=xp)
if method == 'fft':
y_resamps = signal.resample(x, rate_to, axis=-1)
else:
if ext and rate_to != rate:
# Match default window design
g = gcd(rate_to, rate)
up = rate_to // g
down = rate // g
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
window = signal.firwin(2 * half_len + 1, f_c,
window=('kaiser', 5.0))
window = xp.asarray(window)
polyargs = {'window': window, 'padtype': padtype}
else:
polyargs = {'padtype': padtype}
y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
**polyargs)
for i in range(y_tos.shape[0]):
y_to = y_tos[i, :]
y_resamp = y_resamps[i, :]
freq = float(freqs[i, 0])
if freq >= 0.5 * rate_to:
#y_to.fill(0.) # mostly low-passed away
y_to = xp.zeros_like(y_to) # mostly low-passed away
if padtype in ['minimum', 'maximum']:
xp_assert_close(y_resamp, y_to, atol=3e-1)
else:
xp_assert_close(y_resamp, y_to, atol=1e-3)
else:
assert y_to.shape == y_resamp.shape
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert corr > 0.99, (corr, rate, rate_to)
# Random data
rng = np.random.RandomState(0)
x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind
x = xp.asarray(x)
for rate_to in rates_to:
# random data
t_to = xp.arange(rate_to, dtype=xp.float64) / float(rate_to)
y_to = np.interp(t_to, t, x)
if method == 'fft':
y_resamp = signal.resample(x, rate_to)
else:
y_resamp = signal.resample_poly(x, rate_to, rate,
padtype=padtype)
assert y_to.shape == y_resamp.shape
corr = xp.asarray(np.corrcoef(y_to, y_resamp)[0, 1])
assert corr > 0.99, corr
# More tests of fft method (Master 0.18.1 fails these)
if method == 'fft':
x1 = xp.asarray([1.+0.j, 0.+0.j])
y1_test = signal.resample(x1, 4)
# upsampling a complex array
y1_true = xp.asarray([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j])
xp_assert_close(y1_test, y1_true, atol=1e-12)
x2 = xp.asarray([1., 0.5, 0., 0.5])
y2_test = signal.resample(x2, 2) # downsampling a real array
y2_true = xp.asarray([1., 0.])
xp_assert_close(y2_test, y2_true, atol=1e-12)
@pytest.mark.parametrize("n_in", (8, 9))
@pytest.mark.parametrize("n_out", (3, 4))
@make_xp_test_case(signal.resample)
def test_resample_win_func(self, n_in, n_out):
"""Test callable window function. """
x_in = np.ones(n_in)
def win(freqs):
"""Scale input by 1/2"""
return 0.5 * np.ones_like(freqs)
y0 = signal.resample(x_in, n_out)
y1 = signal.resample(x_in, n_out, window=win)
xp_assert_close(2*y1, y0, atol=1e-12)
@pytest.mark.parametrize("n_in", (6, 12))
@pytest.mark.parametrize("n_out", (3, 4))
@make_xp_test_case(signal.resample)
def test__resample_param_t(self, n_in, n_out):
"""Verify behavior for parameter `t`.
Note that only `t[0]` and `t[1]` are utilized.
"""
t0, dt = 10, 2
x_in = np.ones(n_in)
y0 = signal.resample(x_in, n_out)
y1, t1 = signal.resample(x_in, n_out, t=[t0, t0+dt])
t_ref = 10 + np.arange(len(y0)) * dt * n_in / n_out
xp_assert_equal(y1, y0) # no influence of `t`
xp_assert_close(t1, t_ref, atol=1e-12)
@pytest.mark.parametrize("n1", (2, 3, 7, 8))
@pytest.mark.parametrize("n0", (2, 3, 7, 8))
@make_xp_test_case(signal.resample)
def test_resample_nyquist(self, n0, n1):
"""Test behavior at Nyquist frequency to ensure issue #14569 is fixed. """
f_ny = min(n0, n1) // 2
tt = (np.arange(n_) / n_ for n_ in (n0, n1))
x0, x1 = (np.cos(2 * np.pi * f_ny * t_) for t_ in tt)
y1_r = signal.resample(x0, n1)
y1_c = signal.resample(x0 + 0j, n1)
xp_assert_close(y1_r, x1, atol=1e-12)
xp_assert_close(y1_c.real, x1, atol=1e-12)
@pytest.mark.parametrize('down_factor', [2, 11, 79])
@pytest.mark.parametrize("dtype", [int, np.float32, np.complex64, float, complex])
@make_xp_test_case(signal.resample_poly)
def test_poly_vs_filtfilt(self, down_factor, dtype, xp):
# Check that up=1.0 gives same answer as filtfilt + slicing
random_state = np.random.RandomState(17)
size = 10000
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
# resample_poly assumes zeros outside of signl, whereas filtfilt
# can only constant-pad. Make them equivalent:
x[0] = 0
x[-1] = 0
h = signal.firwin(31, 1. / down_factor, window='hamming')
yf = filtfilt(h, 1.0, x, padtype='constant')[::down_factor]
# Need to pass convolved version of filter to resample_poly,
# since filtfilt does forward and backward, but resample_poly
# only goes forward
hc = convolve(h, np.flip(h))
# Use yf.copy() to avoid negative strides, which are unsupported
# in torch.
x, hc, yf = map(xp.asarray, (x, hc, yf.copy()))
y = signal.resample_poly(x, 1, down_factor, window=hc)
xp_assert_close(yf, y, atol=3e-7, rtol=6e-7)
@make_xp_test_case(signal.resample_poly)
def test_correlate1d(self, xp):
for down in [2, 4]:
for nx in range(1, 40, down):
for nweights in (32, 33):
x = np.random.random((nx,))
weights = np.random.random((nweights,))
y_g = correlate1d(x, np.flip(weights), mode='constant')
x, weights, y_g = map(xp.asarray, (x, weights, y_g))
y_s = signal.resample_poly(
x, up=1, down=down, window=weights)
xp_assert_close(y_g[::down], y_s)
@make_xp_test_case(signal.resample_poly)
@pytest.mark.parametrize('dtype', ['int32', 'float32'])
@skip_xp_backends("cupy", reason="padtype not supported by upfirdn")
def test_gh_15620(self, dtype, xp):
dtype = getattr(xp, dtype)
data = xp.asarray([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
actual = signal.resample_poly(data,
up=2,
down=1,
padtype='smooth')
assert np.count_nonzero(actual) > 0
@make_xp_test_case(signal.cspline1d_eval)
| TestResample |
python | sqlalchemy__sqlalchemy | test/sql/test_text.py | {
"start": 11166,
"end": 18084
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_positional(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), bindparam("whee", 7))
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bar=4, whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_positional_plus_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={"bar": 4, "whee": 7},
)
def test_literal_binds(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam("bar", 4), whee="whee")
self.assert_compile(
t,
"select * from foo where lala=4 and hoho='whee'",
checkparams={},
literal_binds=True,
)
def _assert_type_map(self, t, compare):
map_ = {b.key: b.type for b in t._bindparams.values()}
for k in compare:
assert compare[k]._type_affinity is map_[k]._type_affinity
def test_typing_construction(self):
t = text("select * from table :foo :bar :bat")
self._assert_type_map(
t, {"foo": NullType(), "bar": NullType(), "bat": NullType()}
)
t = t.bindparams(bindparam("foo", type_=String))
self._assert_type_map(
t, {"foo": String(), "bar": NullType(), "bat": NullType()}
)
t = t.bindparams(bindparam("bar", type_=Integer))
self._assert_type_map(
t, {"foo": String(), "bar": Integer(), "bat": NullType()}
)
t = t.bindparams(bat=45.564)
self._assert_type_map(
t, {"foo": String(), "bar": Integer(), "bat": Float()}
)
def test_binds_compiled_named(self):
self.assert_compile(
text(
"select * from foo where lala=:bar and hoho=:whee"
).bindparams(bar=4, whee=7),
"select * from foo where lala=%(bar)s and hoho=%(whee)s",
checkparams={"bar": 4, "whee": 7},
dialect="postgresql",
)
def test_unique_binds(self):
# unique binds can be used in text() however they uniquify across
# multiple text() constructs only, not within a single text
t1 = text("select :foo").bindparams(bindparam("foo", 5, unique=True))
t2 = text("select :foo").bindparams(bindparam("foo", 10, unique=True))
stmt = select(t1, t2)
self.assert_compile(
stmt,
"SELECT select :foo_1, select :foo_2",
checkparams={"foo_1": 5, "foo_2": 10},
)
def test_binds_compiled_positional(self):
self.assert_compile(
text(
"select * from foo where lala=:bar and hoho=:whee"
).bindparams(bar=4, whee=7),
"select * from foo where lala=? and hoho=?",
checkparams={"bar": 4, "whee": 7},
dialect="sqlite",
)
def test_missing_bind_kw(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
foo=5,
bar=7,
)
def test_missing_bind_posn(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
bindparam("foo", value=5),
bindparam("bar", value=7),
)
def test_escaping_colons(self):
# test escaping out text() params with a backslash
self.assert_compile(
text(
r"select * from foo where clock='05:06:07' "
r"and mork='\:mindy'"
),
"select * from foo where clock='05:06:07' and mork=':mindy'",
checkparams={},
params={},
dialect="postgresql",
)
def test_escaping_double_colons(self):
self.assert_compile(
text(
r"SELECT * FROM pg_attribute WHERE "
r"attrelid = :tab\:\:regclass"
),
"SELECT * FROM pg_attribute WHERE attrelid = %(tab)s::regclass",
params={"tab": None},
dialect="postgresql",
)
def test_double_colons_dont_actually_need_escaping(self):
# this is news to me. bound param won't work but you can put the
# double colons in
self.assert_compile(
text(
r"SELECT * FROM pg_attribute WHERE "
r"attrelid = foo::regclass"
),
"SELECT * FROM pg_attribute WHERE attrelid = foo::regclass",
params={},
dialect="postgresql",
)
def test_text_in_select_nonfrom(self):
generate_series = text(
"generate_series(:x, :y, :z) as s(a)"
).bindparams(x=None, y=None, z=None)
s = select(
(func.current_date() + literal_column("s.a")).label("dates")
).select_from(generate_series)
self.assert_compile(
s,
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={"y": None, "x": None, "z": None},
)
self.assert_compile(
s.params(x=5, y=6, z=7),
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={"y": 6, "x": 5, "z": 7},
)
def test_escaping_percent_signs(self):
stmt = text("select '%' where foo like '%bar%'")
self.assert_compile(
stmt, "select '%' where foo like '%bar%'", dialect="sqlite"
)
self.assert_compile(
stmt, "select '%%' where foo like '%%bar%%'", dialect="mysql"
)
def test_percent_signs_literal_binds(self):
stmt = select(literal("percent % signs %%"))
self.assert_compile(
stmt,
"SELECT 'percent % signs %%' AS anon_1",
dialect="sqlite",
literal_binds=True,
)
self.assert_compile(
stmt,
"SELECT 'percent %% signs %%%%' AS anon_1",
dialect="mysql",
literal_binds=True,
)
| BindParamTest |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_repo_path_parsing.py | {
"start": 14792,
"end": 16634
} | class ____(BaseStacktraceLinkTest):
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_provider_integration(
provider="gitlab",
name="getsentry",
external_id="1234",
metadata={"domain_name": "gitlab.com/getsentry"},
)
self.oi = self.integration.add_organization(self.org, self.user)
self.repo = self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:gitlab",
integration_id=self.integration.id,
url="https://gitlab.com/getsentry/sentry",
)
def test_basic(self) -> None:
source_url = "https://gitlab.com/getsentry/sentry/-/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "gitlab",
"stackRoot": "sentry/",
"sourceRoot": "src/sentry/",
"defaultBranch": "master",
}
def test_skips_null_repo_url(self) -> None:
self.repo.update(url=None)
source_url = "https://gitlab.com/getsentry/sentry/-/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {"sourceUrl": ["Could not find repo"]}
| ProjectStacktraceLinkGitlabTest |
python | kubernetes-client__python | kubernetes/client/models/version_info.py | {
"start": 383,
"end": 14679
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'build_date': 'str',
'compiler': 'str',
'emulation_major': 'str',
'emulation_minor': 'str',
'git_commit': 'str',
'git_tree_state': 'str',
'git_version': 'str',
'go_version': 'str',
'major': 'str',
'min_compatibility_major': 'str',
'min_compatibility_minor': 'str',
'minor': 'str',
'platform': 'str'
}
attribute_map = {
'build_date': 'buildDate',
'compiler': 'compiler',
'emulation_major': 'emulationMajor',
'emulation_minor': 'emulationMinor',
'git_commit': 'gitCommit',
'git_tree_state': 'gitTreeState',
'git_version': 'gitVersion',
'go_version': 'goVersion',
'major': 'major',
'min_compatibility_major': 'minCompatibilityMajor',
'min_compatibility_minor': 'minCompatibilityMinor',
'minor': 'minor',
'platform': 'platform'
}
def __init__(self, build_date=None, compiler=None, emulation_major=None, emulation_minor=None, git_commit=None, git_tree_state=None, git_version=None, go_version=None, major=None, min_compatibility_major=None, min_compatibility_minor=None, minor=None, platform=None, local_vars_configuration=None): # noqa: E501
"""VersionInfo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._build_date = None
self._compiler = None
self._emulation_major = None
self._emulation_minor = None
self._git_commit = None
self._git_tree_state = None
self._git_version = None
self._go_version = None
self._major = None
self._min_compatibility_major = None
self._min_compatibility_minor = None
self._minor = None
self._platform = None
self.discriminator = None
self.build_date = build_date
self.compiler = compiler
if emulation_major is not None:
self.emulation_major = emulation_major
if emulation_minor is not None:
self.emulation_minor = emulation_minor
self.git_commit = git_commit
self.git_tree_state = git_tree_state
self.git_version = git_version
self.go_version = go_version
self.major = major
if min_compatibility_major is not None:
self.min_compatibility_major = min_compatibility_major
if min_compatibility_minor is not None:
self.min_compatibility_minor = min_compatibility_minor
self.minor = minor
self.platform = platform
@property
def build_date(self):
"""Gets the build_date of this VersionInfo. # noqa: E501
:return: The build_date of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._build_date
@build_date.setter
def build_date(self, build_date):
"""Sets the build_date of this VersionInfo.
:param build_date: The build_date of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and build_date is None: # noqa: E501
raise ValueError("Invalid value for `build_date`, must not be `None`") # noqa: E501
self._build_date = build_date
@property
def compiler(self):
"""Gets the compiler of this VersionInfo. # noqa: E501
:return: The compiler of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._compiler
@compiler.setter
def compiler(self, compiler):
"""Sets the compiler of this VersionInfo.
:param compiler: The compiler of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and compiler is None: # noqa: E501
raise ValueError("Invalid value for `compiler`, must not be `None`") # noqa: E501
self._compiler = compiler
@property
def emulation_major(self):
"""Gets the emulation_major of this VersionInfo. # noqa: E501
EmulationMajor is the major version of the emulation version # noqa: E501
:return: The emulation_major of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._emulation_major
@emulation_major.setter
def emulation_major(self, emulation_major):
"""Sets the emulation_major of this VersionInfo.
EmulationMajor is the major version of the emulation version # noqa: E501
:param emulation_major: The emulation_major of this VersionInfo. # noqa: E501
:type: str
"""
self._emulation_major = emulation_major
@property
def emulation_minor(self):
"""Gets the emulation_minor of this VersionInfo. # noqa: E501
EmulationMinor is the minor version of the emulation version # noqa: E501
:return: The emulation_minor of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._emulation_minor
@emulation_minor.setter
def emulation_minor(self, emulation_minor):
"""Sets the emulation_minor of this VersionInfo.
EmulationMinor is the minor version of the emulation version # noqa: E501
:param emulation_minor: The emulation_minor of this VersionInfo. # noqa: E501
:type: str
"""
self._emulation_minor = emulation_minor
@property
def git_commit(self):
"""Gets the git_commit of this VersionInfo. # noqa: E501
:return: The git_commit of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._git_commit
@git_commit.setter
def git_commit(self, git_commit):
"""Sets the git_commit of this VersionInfo.
:param git_commit: The git_commit of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and git_commit is None: # noqa: E501
raise ValueError("Invalid value for `git_commit`, must not be `None`") # noqa: E501
self._git_commit = git_commit
@property
def git_tree_state(self):
"""Gets the git_tree_state of this VersionInfo. # noqa: E501
:return: The git_tree_state of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._git_tree_state
@git_tree_state.setter
def git_tree_state(self, git_tree_state):
"""Sets the git_tree_state of this VersionInfo.
:param git_tree_state: The git_tree_state of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and git_tree_state is None: # noqa: E501
raise ValueError("Invalid value for `git_tree_state`, must not be `None`") # noqa: E501
self._git_tree_state = git_tree_state
@property
def git_version(self):
"""Gets the git_version of this VersionInfo. # noqa: E501
:return: The git_version of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._git_version
@git_version.setter
def git_version(self, git_version):
"""Sets the git_version of this VersionInfo.
:param git_version: The git_version of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and git_version is None: # noqa: E501
raise ValueError("Invalid value for `git_version`, must not be `None`") # noqa: E501
self._git_version = git_version
@property
def go_version(self):
"""Gets the go_version of this VersionInfo. # noqa: E501
:return: The go_version of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._go_version
@go_version.setter
def go_version(self, go_version):
"""Sets the go_version of this VersionInfo.
:param go_version: The go_version of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and go_version is None: # noqa: E501
raise ValueError("Invalid value for `go_version`, must not be `None`") # noqa: E501
self._go_version = go_version
@property
def major(self):
"""Gets the major of this VersionInfo. # noqa: E501
Major is the major version of the binary version # noqa: E501
:return: The major of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._major
@major.setter
def major(self, major):
"""Sets the major of this VersionInfo.
Major is the major version of the binary version # noqa: E501
:param major: The major of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and major is None: # noqa: E501
raise ValueError("Invalid value for `major`, must not be `None`") # noqa: E501
self._major = major
@property
def min_compatibility_major(self):
"""Gets the min_compatibility_major of this VersionInfo. # noqa: E501
MinCompatibilityMajor is the major version of the minimum compatibility version # noqa: E501
:return: The min_compatibility_major of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._min_compatibility_major
@min_compatibility_major.setter
def min_compatibility_major(self, min_compatibility_major):
"""Sets the min_compatibility_major of this VersionInfo.
MinCompatibilityMajor is the major version of the minimum compatibility version # noqa: E501
:param min_compatibility_major: The min_compatibility_major of this VersionInfo. # noqa: E501
:type: str
"""
self._min_compatibility_major = min_compatibility_major
@property
def min_compatibility_minor(self):
"""Gets the min_compatibility_minor of this VersionInfo. # noqa: E501
MinCompatibilityMinor is the minor version of the minimum compatibility version # noqa: E501
:return: The min_compatibility_minor of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._min_compatibility_minor
@min_compatibility_minor.setter
def min_compatibility_minor(self, min_compatibility_minor):
"""Sets the min_compatibility_minor of this VersionInfo.
MinCompatibilityMinor is the minor version of the minimum compatibility version # noqa: E501
:param min_compatibility_minor: The min_compatibility_minor of this VersionInfo. # noqa: E501
:type: str
"""
self._min_compatibility_minor = min_compatibility_minor
@property
def minor(self):
"""Gets the minor of this VersionInfo. # noqa: E501
Minor is the minor version of the binary version # noqa: E501
:return: The minor of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._minor
@minor.setter
def minor(self, minor):
"""Sets the minor of this VersionInfo.
Minor is the minor version of the binary version # noqa: E501
:param minor: The minor of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and minor is None: # noqa: E501
raise ValueError("Invalid value for `minor`, must not be `None`") # noqa: E501
self._minor = minor
@property
def platform(self):
"""Gets the platform of this VersionInfo. # noqa: E501
:return: The platform of this VersionInfo. # noqa: E501
:rtype: str
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this VersionInfo.
:param platform: The platform of this VersionInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and platform is None: # noqa: E501
raise ValueError("Invalid value for `platform`, must not be `None`") # noqa: E501
self._platform = platform
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VersionInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VersionInfo):
return True
return self.to_dict() != other.to_dict()
| VersionInfo |
python | lepture__authlib | authlib/integrations/base_client/async_openid.py | {
"start": 235,
"end": 2982
} | class ____:
async def fetch_jwk_set(self, force=False):
metadata = await self.load_server_metadata()
jwk_set = metadata.get("jwks")
if jwk_set and not force:
return jwk_set
uri = metadata.get("jwks_uri")
if not uri:
raise RuntimeError('Missing "jwks_uri" in metadata')
async with self.client_cls(**self.client_kwargs) as client:
resp = await client.request("GET", uri, withhold_token=True)
resp.raise_for_status()
jwk_set = resp.json()
self.server_metadata["jwks"] = jwk_set
return jwk_set
async def userinfo(self, **kwargs):
"""Fetch user info from ``userinfo_endpoint``."""
metadata = await self.load_server_metadata()
resp = await self.get(metadata["userinfo_endpoint"], **kwargs)
resp.raise_for_status()
data = resp.json()
return UserInfo(data)
async def parse_id_token(
self, token, nonce, claims_options=None, claims_cls=None, leeway=120
):
"""Return an instance of UserInfo from token's ``id_token``."""
claims_params = dict(
nonce=nonce,
client_id=self.client_id,
)
if claims_cls is None:
if "access_token" in token:
claims_params["access_token"] = token["access_token"]
claims_cls = CodeIDToken
else:
claims_cls = ImplicitIDToken
metadata = await self.load_server_metadata()
if claims_options is None and "issuer" in metadata:
claims_options = {"iss": {"values": [metadata["issuer"]]}}
alg_values = metadata.get("id_token_signing_alg_values_supported")
if not alg_values:
alg_values = ["RS256"]
jwt = JsonWebToken(alg_values)
jwk_set = await self.fetch_jwk_set()
try:
claims = jwt.decode(
token["id_token"],
key=JsonWebKey.import_key_set(jwk_set),
claims_cls=claims_cls,
claims_options=claims_options,
claims_params=claims_params,
)
except ValueError:
jwk_set = await self.fetch_jwk_set(force=True)
claims = jwt.decode(
token["id_token"],
key=JsonWebKey.import_key_set(jwk_set),
claims_cls=claims_cls,
claims_options=claims_options,
claims_params=claims_params,
)
# https://github.com/authlib/authlib/issues/259
if claims.get("nonce_supported") is False:
claims.params["nonce"] = None
claims.validate(leeway=leeway)
return UserInfo(claims)
| AsyncOpenIDMixin |
python | Textualize__textual | src/textual/events.py | {
"start": 22301,
"end": 22565
} | class ____(Event, bubble=False):
"""Sent when the app loses focus.
- [ ] Bubbles
- [ ] Verbose
Note:
Only available when running within a terminal that supports
`FocusOut`, or when running via textual-web.
"""
@dataclass
| AppBlur |
python | huggingface__transformers | src/transformers/models/swin/modeling_swin.py | {
"start": 16855,
"end": 21391
} | class ____(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in SwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
| SwinSelfAttention |
python | tox-dev__tox | src/tox/execute/local_sub_process/__init__.py | {
"start": 1745,
"end": 6129
} | class ____(ExecuteStatus):
def __init__(self, options: ExecuteOptions, out: SyncWrite, err: SyncWrite, process: Popen[bytes]) -> None:
self._process: Popen[bytes] = process
super().__init__(options, out, err)
self._interrupted = False
@property
def exit_code(self) -> int | None:
# need to poll here, to make sure the returncode we get is current
self._process.poll()
return self._process.returncode
def interrupt(self) -> None:
self._interrupted = True
if self._process is not None: # pragma: no branch
# A three level stop mechanism for children - INT -> TERM -> KILL
# communicate will wait for the app to stop, and then drain the standard streams and close them
to_pid, host_pid = self._process.pid, os.getpid()
msg = "requested interrupt of %d from %d, activate in %.2f"
logging.warning(msg, to_pid, host_pid, self.options.suicide_timeout)
if self.wait(self.options.suicide_timeout) is None: # still alive -> INT
# on Windows everyone in the same process group, so they got the message
if sys.platform != "win32": # pragma: win32 cover
msg = "send signal %s to %d from %d with timeout %.2f"
logging.warning(msg, f"SIGINT({SIG_INTERRUPT})", to_pid, host_pid, self.options.interrupt_timeout)
self._process.send_signal(SIG_INTERRUPT)
if self.wait(self.options.interrupt_timeout) is None: # still alive -> TERM # pragma: no branch
terminate_output = self.options.terminate_timeout
msg = "send signal %s to %d from %d with timeout %.2f"
logging.warning(msg, f"SIGTERM({SIGTERM})", to_pid, host_pid, terminate_output)
self._process.terminate()
# Windows terminate is UNIX kill
if sys.platform != "win32" and self.wait(terminate_output) is None: # pragma: no branch
logging.warning(msg[:-18], f"SIGKILL({SIGKILL})", to_pid, host_pid)
self._process.kill() # still alive -> KILL
self.wait() # unconditional wait as kill should soon bring down the process
logging.warning("interrupt finished with success")
else: # pragma: no cover # difficult to test, process must die just as it's being interrupted
logging.warning("process already dead with %s within %s", self._process.returncode, host_pid)
def wait(self, timeout: float | None = None) -> int | None:
try: # note wait in general might deadlock if output large, but we drain in background threads so not an issue
return self._process.wait(timeout=timeout)
except TimeoutExpired:
return None
def write_stdin(self, content: str) -> None:
stdin = self._process.stdin
if stdin is None: # pragma: no branch
return # pragma: no cover
bytes_content = content.encode()
try:
if sys.platform == "win32": # explicit check for mypy # pragma: win32 cover
# on Windows we have a PipeHandle object here rather than a file stream
import _overlapped # type: ignore[import] # noqa: PLC0415,PLC2701
ov = _overlapped.Overlapped(0)
ov.WriteFile(stdin.handle, bytes_content) # type: ignore[attr-defined]
result = ov.getresult(10) # wait up to 10ms to perform the operation
if result != len(bytes_content):
msg = f"failed to write to {stdin!r}"
raise RuntimeError(msg)
else:
stdin.write(bytes_content)
stdin.flush()
except OSError: # pragma: no cover
if self._interrupted: # pragma: no cover
pass # pragma: no cover # if the process was asked to exit in the meantime ignore write errors
raise # pragma: no cover
def __repr__(self) -> str:
return f"{self.__class__.__name__}(pid={self._process.pid}, returncode={self._process.returncode!r})"
@property
def metadata(self) -> dict[str, Any]:
return {"pid": self._process.pid} if self._process.pid else {}
| LocalSubprocessExecuteStatus |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/components/workspace_component/scaffolder.py | {
"start": 431,
"end": 1046
} | class ____(Scaffolder[AirbyteScaffolderParams]):
@classmethod
def get_scaffold_params(cls) -> type[AirbyteScaffolderParams]:
return AirbyteScaffolderParams
def scaffold(self, request: ScaffoldRequest[AirbyteScaffolderParams]) -> None:
scaffold_component(
request,
{
"workspace": {
"workspace_id": request.params.workspace_id,
"client_id": request.params.client_id,
"client_secret": request.params.client_secret,
}
},
)
| AirbyteWorkspaceComponentScaffolder |
python | networkx__networkx | networkx/generators/tests/test_classic.py | {
"start": 269,
"end": 24400
} | class ____:
def test_balanced_tree(self):
# balanced_tree(r,h) is a tree with (r**(h+1)-1)/(r-1) edges
for r, h in [(2, 2), (3, 3), (6, 2)]:
t = nx.balanced_tree(r, h)
order = t.order()
assert order == (r ** (h + 1) - 1) / (r - 1)
assert nx.is_connected(t)
assert t.size() == order - 1
dh = nx.degree_histogram(t)
assert dh[0] == 0 # no nodes of 0
assert dh[1] == r**h # nodes of degree 1 are leaves
assert dh[r] == 1 # root is degree r
assert dh[r + 1] == order - r**h - 1 # everyone else is degree r+1
assert len(dh) == r + 2
def test_balanced_tree_star(self):
# balanced_tree(r,1) is the r-star
t = nx.balanced_tree(r=2, h=1)
assert nx.could_be_isomorphic(t, nx.star_graph(2))
t = nx.balanced_tree(r=5, h=1)
assert nx.could_be_isomorphic(t, nx.star_graph(5))
t = nx.balanced_tree(r=10, h=1)
assert nx.could_be_isomorphic(t, nx.star_graph(10))
def test_balanced_tree_path(self):
"""Tests that the balanced tree with branching factor one is the
path graph.
"""
# A tree of height four has five levels.
T = nx.balanced_tree(1, 4)
P = nx.path_graph(5)
assert nx.could_be_isomorphic(T, P)
def test_full_rary_tree(self):
r = 2
n = 9
t = nx.full_rary_tree(r, n)
assert t.order() == n
assert nx.is_connected(t)
dh = nx.degree_histogram(t)
assert dh[0] == 0 # no nodes of 0
assert dh[1] == 5 # nodes of degree 1 are leaves
assert dh[r] == 1 # root is degree r
assert dh[r + 1] == 9 - 5 - 1 # everyone else is degree r+1
assert len(dh) == r + 2
def test_full_rary_tree_balanced(self):
t = nx.full_rary_tree(2, 15)
th = nx.balanced_tree(2, 3)
assert nx.could_be_isomorphic(t, th)
def test_full_rary_tree_path(self):
t = nx.full_rary_tree(1, 10)
assert nx.could_be_isomorphic(t, nx.path_graph(10))
def test_full_rary_tree_empty(self):
t = nx.full_rary_tree(0, 10)
assert nx.could_be_isomorphic(t, nx.empty_graph(10))
t = nx.full_rary_tree(3, 0)
assert nx.could_be_isomorphic(t, nx.empty_graph(0))
def test_full_rary_tree_3_20(self):
t = nx.full_rary_tree(3, 20)
assert t.order() == 20
def test_barbell_graph(self):
# number of nodes = 2*m1 + m2 (2 m1-complete graphs + m2-path + 2 edges)
# number of edges = 2*(nx.number_of_edges(m1-complete graph) + m2 + 1
m1 = 3
m2 = 5
b = nx.barbell_graph(m1, m2)
assert nx.number_of_nodes(b) == 2 * m1 + m2
assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1
m1 = 4
m2 = 10
b = nx.barbell_graph(m1, m2)
assert nx.number_of_nodes(b) == 2 * m1 + m2
assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1
m1 = 3
m2 = 20
b = nx.barbell_graph(m1, m2)
assert nx.number_of_nodes(b) == 2 * m1 + m2
assert nx.number_of_edges(b) == m1 * (m1 - 1) + m2 + 1
# Raise NetworkXError if m1<2
m1 = 1
m2 = 20
pytest.raises(nx.NetworkXError, nx.barbell_graph, m1, m2)
# Raise NetworkXError if m2<0
m1 = 5
m2 = -2
pytest.raises(nx.NetworkXError, nx.barbell_graph, m1, m2)
# nx.barbell_graph(2,m) = nx.path_graph(m+4)
m1 = 2
m2 = 5
b = nx.barbell_graph(m1, m2)
assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4))
m1 = 2
m2 = 10
b = nx.barbell_graph(m1, m2)
assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4))
m1 = 2
m2 = 20
b = nx.barbell_graph(m1, m2)
assert nx.could_be_isomorphic(b, nx.path_graph(m2 + 4))
pytest.raises(
nx.NetworkXError, nx.barbell_graph, m1, m2, create_using=nx.DiGraph()
)
mb = nx.barbell_graph(m1, m2, create_using=nx.MultiGraph())
assert edges_equal(mb.edges(), b.edges())
def test_binomial_tree(self):
graphs = (None, nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
for create_using in graphs:
for n in range(4):
b = nx.binomial_tree(n, create_using)
assert nx.number_of_nodes(b) == 2**n
assert nx.number_of_edges(b) == (2**n - 1)
def test_complete_graph(self):
# complete_graph(m) is a connected graph with
# m nodes and m*(m+1)/2 edges
for m in [0, 1, 3, 5]:
g = nx.complete_graph(m)
assert nx.number_of_nodes(g) == m
assert nx.number_of_edges(g) == m * (m - 1) // 2
mg = nx.complete_graph(m, create_using=nx.MultiGraph)
assert edges_equal(mg.edges(), g.edges())
g = nx.complete_graph("abc")
assert nodes_equal(g.nodes(), ["a", "b", "c"])
assert g.size() == 3
# creates a self-loop... should it? <backward compatible says yes>
g = nx.complete_graph("abcb")
assert nodes_equal(g.nodes(), ["a", "b", "c"])
assert g.size() == 4
g = nx.complete_graph("abcb", create_using=nx.MultiGraph)
assert nodes_equal(g.nodes(), ["a", "b", "c"])
assert g.size() == 6
def test_complete_digraph(self):
# complete_graph(m) is a connected graph with
# m nodes and m*(m+1)/2 edges
for m in [0, 1, 3, 5]:
g = nx.complete_graph(m, create_using=nx.DiGraph)
assert nx.number_of_nodes(g) == m
assert nx.number_of_edges(g) == m * (m - 1)
g = nx.complete_graph("abc", create_using=nx.DiGraph)
assert len(g) == 3
assert g.size() == 6
assert g.is_directed()
def test_circular_ladder_graph(self):
G = nx.circular_ladder_graph(5)
pytest.raises(
nx.NetworkXError, nx.circular_ladder_graph, 5, create_using=nx.DiGraph
)
mG = nx.circular_ladder_graph(5, create_using=nx.MultiGraph)
assert edges_equal(mG.edges(), G.edges())
def test_circulant_graph(self):
# Ci_n(1) is the cycle graph for all n
Ci6_1 = nx.circulant_graph(6, [1])
C6 = nx.cycle_graph(6)
assert edges_equal(Ci6_1.edges(), C6.edges())
# Ci_n(1, 2, ..., n div 2) is the complete graph for all n
Ci7 = nx.circulant_graph(7, [1, 2, 3])
K7 = nx.complete_graph(7)
assert edges_equal(Ci7.edges(), K7.edges())
# Ci_6(1, 3) is K_3,3 i.e. the utility graph
Ci6_1_3 = nx.circulant_graph(6, [1, 3])
K3_3 = nx.complete_bipartite_graph(3, 3)
assert nx.could_be_isomorphic(Ci6_1_3, K3_3)
def test_cycle_graph(self):
G = nx.cycle_graph(4)
assert edges_equal(G.edges(), [(0, 1), (0, 3), (1, 2), (2, 3)])
mG = nx.cycle_graph(4, create_using=nx.MultiGraph)
assert edges_equal(mG.edges(), [(0, 1), (0, 3), (1, 2), (2, 3)])
G = nx.cycle_graph(4, create_using=nx.DiGraph)
assert not G.has_edge(2, 1)
assert G.has_edge(1, 2)
assert G.is_directed()
G = nx.cycle_graph("abc")
assert len(G) == 3
assert G.size() == 3
G = nx.cycle_graph("abcb")
assert len(G) == 3
assert G.size() == 2
g = nx.cycle_graph("abc", nx.DiGraph)
assert len(g) == 3
assert g.size() == 3
assert g.is_directed()
g = nx.cycle_graph("abcb", nx.DiGraph)
assert len(g) == 3
assert g.size() == 4
def test_dorogovtsev_goltsev_mendes_graph(self):
G = nx.dorogovtsev_goltsev_mendes_graph(0)
assert edges_equal(G.edges(), [(0, 1)])
assert nodes_equal(list(G), [0, 1])
G = nx.dorogovtsev_goltsev_mendes_graph(1)
assert edges_equal(G.edges(), [(0, 1), (0, 2), (1, 2)])
assert nx.average_clustering(G) == 1.0
assert nx.average_shortest_path_length(G) == 1.0
assert sorted(nx.triangles(G).values()) == [1, 1, 1]
assert nx.is_planar(G)
G = nx.dorogovtsev_goltsev_mendes_graph(2)
assert nx.number_of_nodes(G) == 6
assert nx.number_of_edges(G) == 9
assert nx.average_clustering(G) == 0.75
assert nx.average_shortest_path_length(G) == 1.4
assert nx.is_planar(G)
G = nx.dorogovtsev_goltsev_mendes_graph(10)
assert nx.number_of_nodes(G) == 29526
assert nx.number_of_edges(G) == 59049
assert G.degree(0) == 1024
assert G.degree(1) == 1024
assert G.degree(2) == 1024
with pytest.raises(nx.NetworkXError, match=r"n must be greater than"):
nx.dorogovtsev_goltsev_mendes_graph(-1)
with pytest.raises(nx.NetworkXError, match=r"directed graph not supported"):
nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError, match=r"multigraph not supported"):
nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.MultiGraph)
with pytest.raises(nx.NetworkXError):
nx.dorogovtsev_goltsev_mendes_graph(7, create_using=nx.MultiDiGraph)
def test_create_using(self):
G = nx.empty_graph()
assert isinstance(G, nx.Graph)
pytest.raises(TypeError, nx.empty_graph, create_using=0.0)
pytest.raises(TypeError, nx.empty_graph, create_using="Graph")
G = nx.empty_graph(create_using=nx.MultiGraph)
assert isinstance(G, nx.MultiGraph)
G = nx.empty_graph(create_using=nx.DiGraph)
assert isinstance(G, nx.DiGraph)
G = nx.empty_graph(create_using=nx.DiGraph, default=nx.MultiGraph)
assert isinstance(G, nx.DiGraph)
G = nx.empty_graph(create_using=None, default=nx.MultiGraph)
assert isinstance(G, nx.MultiGraph)
G = nx.empty_graph(default=nx.MultiGraph)
assert isinstance(G, nx.MultiGraph)
G = nx.path_graph(5)
H = nx.empty_graph(create_using=G)
assert not H.is_multigraph()
assert not H.is_directed()
assert len(H) == 0
assert G is H
H = nx.empty_graph(create_using=nx.MultiGraph())
assert H.is_multigraph()
assert not H.is_directed()
assert G is not H
# test for subclasses that also use typing.Protocol. See gh-6243
class Mixin(typing.Protocol):
pass
class MyGraph(Mixin, nx.DiGraph):
pass
G = nx.empty_graph(create_using=MyGraph)
def test_empty_graph(self):
G = nx.empty_graph()
assert nx.number_of_nodes(G) == 0
G = nx.empty_graph(42)
assert nx.number_of_nodes(G) == 42
assert nx.number_of_edges(G) == 0
G = nx.empty_graph("abc")
assert len(G) == 3
assert G.size() == 0
# create empty digraph
G = nx.empty_graph(42, create_using=nx.DiGraph(name="duh"))
assert nx.number_of_nodes(G) == 42
assert nx.number_of_edges(G) == 0
assert isinstance(G, nx.DiGraph)
# create empty multigraph
G = nx.empty_graph(42, create_using=nx.MultiGraph(name="duh"))
assert nx.number_of_nodes(G) == 42
assert nx.number_of_edges(G) == 0
assert isinstance(G, nx.MultiGraph)
# create empty graph from another
pete = nx.petersen_graph()
G = nx.empty_graph(42, create_using=pete)
assert nx.number_of_nodes(G) == 42
assert nx.number_of_edges(G) == 0
assert isinstance(G, nx.Graph)
def test_ladder_graph(self):
for i, G in [
(0, nx.empty_graph(0)),
(1, nx.path_graph(2)),
(2, nx.hypercube_graph(2)),
(10, nx.grid_graph([2, 10])),
]:
assert nx.could_be_isomorphic(nx.ladder_graph(i), G)
pytest.raises(nx.NetworkXError, nx.ladder_graph, 2, create_using=nx.DiGraph)
g = nx.ladder_graph(2)
mg = nx.ladder_graph(2, create_using=nx.MultiGraph)
assert edges_equal(mg.edges(), g.edges())
@pytest.mark.parametrize(("m", "n"), [(3, 5), (4, 10), (3, 20)])
def test_lollipop_graph_right_sizes(self, m, n):
G = nx.lollipop_graph(m, n)
assert nx.number_of_nodes(G) == m + n
assert nx.number_of_edges(G) == m * (m - 1) / 2 + n
@pytest.mark.parametrize(("m", "n"), [("ab", ""), ("abc", "defg")])
def test_lollipop_graph_size_node_sequence(self, m, n):
G = nx.lollipop_graph(m, n)
assert nx.number_of_nodes(G) == len(m) + len(n)
assert nx.number_of_edges(G) == len(m) * (len(m) - 1) / 2 + len(n)
def test_lollipop_graph_exceptions(self):
# Raise NetworkXError if m<2
pytest.raises(nx.NetworkXError, nx.lollipop_graph, -1, 2)
pytest.raises(nx.NetworkXError, nx.lollipop_graph, 1, 20)
pytest.raises(nx.NetworkXError, nx.lollipop_graph, "", 20)
pytest.raises(nx.NetworkXError, nx.lollipop_graph, "a", 20)
# Raise NetworkXError if n<0
pytest.raises(nx.NetworkXError, nx.lollipop_graph, 5, -2)
# raise NetworkXError if create_using is directed
with pytest.raises(nx.NetworkXError):
nx.lollipop_graph(2, 20, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError):
nx.lollipop_graph(2, 20, create_using=nx.MultiDiGraph)
@pytest.mark.parametrize(("m", "n"), [(2, 0), (2, 5), (2, 10), ("ab", 20)])
def test_lollipop_graph_same_as_path_when_m1_is_2(self, m, n):
G = nx.lollipop_graph(m, n)
assert nx.could_be_isomorphic(G, nx.path_graph(n + 2))
def test_lollipop_graph_for_multigraph(self):
G = nx.lollipop_graph(5, 20)
MG = nx.lollipop_graph(5, 20, create_using=nx.MultiGraph)
assert edges_equal(MG.edges(), G.edges())
@pytest.mark.parametrize(
("m", "n"),
[(4, "abc"), ("abcd", 3), ([1, 2, 3, 4], "abc"), ("abcd", [1, 2, 3])],
)
def test_lollipop_graph_mixing_input_types(self, m, n):
expected = nx.compose(nx.complete_graph(4), nx.path_graph(range(100, 103)))
expected.add_edge(0, 100) # Connect complete graph and path graph
assert nx.could_be_isomorphic(nx.lollipop_graph(m, n), expected)
def test_lollipop_graph_non_builtin_ints(self):
np = pytest.importorskip("numpy")
G = nx.lollipop_graph(np.int32(4), np.int64(3))
expected = nx.compose(nx.complete_graph(4), nx.path_graph(range(100, 103)))
expected.add_edge(0, 100) # Connect complete graph and path graph
assert nx.could_be_isomorphic(G, expected)
def test_null_graph(self):
assert nx.number_of_nodes(nx.null_graph()) == 0
def test_path_graph(self):
p = nx.path_graph(0)
assert nx.could_be_isomorphic(p, nx.null_graph())
p = nx.path_graph(1)
assert nx.could_be_isomorphic(p, nx.empty_graph(1))
p = nx.path_graph(10)
assert nx.is_connected(p)
assert sorted(d for n, d in p.degree()) == [1, 1, 2, 2, 2, 2, 2, 2, 2, 2]
assert p.order() - 1 == p.size()
dp = nx.path_graph(3, create_using=nx.DiGraph)
assert dp.has_edge(0, 1)
assert not dp.has_edge(1, 0)
mp = nx.path_graph(10, create_using=nx.MultiGraph)
assert edges_equal(mp.edges(), p.edges())
G = nx.path_graph("abc")
assert len(G) == 3
assert G.size() == 2
G = nx.path_graph("abcb")
assert len(G) == 3
assert G.size() == 2
g = nx.path_graph("abc", nx.DiGraph)
assert len(g) == 3
assert g.size() == 2
assert g.is_directed()
g = nx.path_graph("abcb", nx.DiGraph)
assert len(g) == 3
assert g.size() == 3
G = nx.path_graph((1, 2, 3, 2, 4))
assert G.has_edge(2, 4)
def test_star_graph(self):
assert nx.could_be_isomorphic(nx.star_graph(""), nx.empty_graph(0))
assert nx.could_be_isomorphic(nx.star_graph([]), nx.empty_graph(0))
assert nx.could_be_isomorphic(nx.star_graph(0), nx.empty_graph(1))
assert nx.could_be_isomorphic(nx.star_graph(1), nx.path_graph(2))
assert nx.could_be_isomorphic(nx.star_graph(2), nx.path_graph(3))
assert nx.could_be_isomorphic(
nx.star_graph(5), nx.complete_bipartite_graph(1, 5)
)
s = nx.star_graph(10)
assert sorted(d for n, d in s.degree()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10]
ms = nx.star_graph(10, create_using=nx.MultiGraph)
assert edges_equal(ms.edges(), s.edges())
G = nx.star_graph("abc")
assert len(G) == 3
assert G.size() == 2
G = nx.star_graph("abcb")
assert len(G) == 3
assert G.size() == 2
G = nx.star_graph("abcb", create_using=nx.MultiGraph)
assert len(G) == 3
assert G.size() == 3
G = nx.star_graph("abcdefg")
assert len(G) == 7
assert G.size() == 6
@pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiDiGraph))
def test_star_graph_directed(self, graph_type):
dg = nx.star_graph(3, create_using=graph_type)
assert sorted([(u, v) for u, v, *d in dg.edges]) == [(0, 1), (0, 2), (0, 3)]
def test_non_int_integers_for_star_graph(self):
np = pytest.importorskip("numpy")
G = nx.star_graph(np.int32(3))
assert len(G) == 4
assert G.size() == 3
@pytest.mark.parametrize(("m", "n"), [(3, 0), (3, 5), (4, 10), (3, 20)])
def test_tadpole_graph_right_sizes(self, m, n):
G = nx.tadpole_graph(m, n)
assert nx.number_of_nodes(G) == m + n
assert nx.number_of_edges(G) == m + n - (m == 2)
@pytest.mark.parametrize(("m", "n"), [("ab", ""), ("ab", "c"), ("abc", "defg")])
def test_tadpole_graph_size_node_sequences(self, m, n):
G = nx.tadpole_graph(m, n)
assert nx.number_of_nodes(G) == len(m) + len(n)
assert nx.number_of_edges(G) == len(m) + len(n) - (len(m) == 2)
def test_tadpole_graph_exceptions(self):
# Raise NetworkXError if m<2
pytest.raises(nx.NetworkXError, nx.tadpole_graph, -1, 3)
pytest.raises(nx.NetworkXError, nx.tadpole_graph, 0, 3)
pytest.raises(nx.NetworkXError, nx.tadpole_graph, 1, 3)
# Raise NetworkXError if n<0
pytest.raises(nx.NetworkXError, nx.tadpole_graph, 5, -2)
# Raise NetworkXError for digraphs
with pytest.raises(nx.NetworkXError):
nx.tadpole_graph(2, 20, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError):
nx.tadpole_graph(2, 20, create_using=nx.MultiDiGraph)
@pytest.mark.parametrize(("m", "n"), [(2, 0), (2, 5), (2, 10), ("ab", 20)])
def test_tadpole_graph_same_as_path_when_m_is_2(self, m, n):
G = nx.tadpole_graph(m, n)
assert nx.could_be_isomorphic(G, nx.path_graph(n + 2))
@pytest.mark.parametrize("m", [4, 7])
def test_tadpole_graph_same_as_cycle_when_m2_is_0(self, m):
G = nx.tadpole_graph(m, 0)
assert nx.could_be_isomorphic(G, nx.cycle_graph(m))
def test_tadpole_graph_for_multigraph(self):
G = nx.tadpole_graph(5, 20)
MG = nx.tadpole_graph(5, 20, create_using=nx.MultiGraph)
assert edges_equal(MG.edges(), G.edges())
@pytest.mark.parametrize(
("m", "n"),
[(4, "abc"), ("abcd", 3), ([1, 2, 3, 4], "abc"), ("abcd", [1, 2, 3])],
)
def test_tadpole_graph_mixing_input_types(self, m, n):
expected = nx.compose(nx.cycle_graph(4), nx.path_graph(range(100, 103)))
expected.add_edge(0, 100) # Connect cycle and path
assert nx.could_be_isomorphic(nx.tadpole_graph(m, n), expected)
def test_tadpole_graph_non_builtin_integers(self):
np = pytest.importorskip("numpy")
G = nx.tadpole_graph(np.int32(4), np.int64(3))
expected = nx.compose(nx.cycle_graph(4), nx.path_graph(range(100, 103)))
expected.add_edge(0, 100) # Connect cycle and path
assert nx.could_be_isomorphic(G, expected)
def test_trivial_graph(self):
assert nx.number_of_nodes(nx.trivial_graph()) == 1
def test_turan_graph(self):
assert nx.number_of_edges(nx.turan_graph(13, 4)) == 63
assert nx.could_be_isomorphic(
nx.turan_graph(13, 4), nx.complete_multipartite_graph(3, 4, 3, 3)
)
def test_wheel_graph(self):
for n, G in [
("", nx.null_graph()),
(0, nx.null_graph()),
(1, nx.empty_graph(1)),
(2, nx.path_graph(2)),
(3, nx.complete_graph(3)),
(4, nx.complete_graph(4)),
]:
g = nx.wheel_graph(n)
assert nx.could_be_isomorphic(g, G)
g = nx.wheel_graph(10)
assert sorted(d for n, d in g.degree()) == [3, 3, 3, 3, 3, 3, 3, 3, 3, 9]
pytest.raises(nx.NetworkXError, nx.wheel_graph, 10, create_using=nx.DiGraph)
mg = nx.wheel_graph(10, create_using=nx.MultiGraph())
assert edges_equal(mg.edges(), g.edges())
G = nx.wheel_graph("abc")
assert len(G) == 3
assert G.size() == 3
G = nx.wheel_graph("abcb")
assert len(G) == 3
assert G.size() == 4
G = nx.wheel_graph("abcb", nx.MultiGraph)
assert len(G) == 3
assert G.size() == 6
def test_non_int_integers_for_wheel_graph(self):
np = pytest.importorskip("numpy")
G = nx.wheel_graph(np.int32(3))
assert len(G) == 3
assert G.size() == 3
def test_complete_0_partite_graph(self):
"""Tests that the complete 0-partite graph is the null graph."""
G = nx.complete_multipartite_graph()
H = nx.null_graph()
assert nodes_equal(G, H)
assert edges_equal(G.edges(), H.edges())
def test_complete_1_partite_graph(self):
"""Tests that the complete 1-partite graph is the empty graph."""
G = nx.complete_multipartite_graph(3)
H = nx.empty_graph(3)
assert nodes_equal(G, H)
assert edges_equal(G.edges(), H.edges())
def test_complete_2_partite_graph(self):
"""Tests that the complete 2-partite graph is the complete bipartite
graph.
"""
G = nx.complete_multipartite_graph(2, 3)
H = nx.complete_bipartite_graph(2, 3)
assert nodes_equal(G, H)
assert edges_equal(G.edges(), H.edges())
def test_complete_multipartite_graph(self):
"""Tests for generating the complete multipartite graph."""
G = nx.complete_multipartite_graph(2, 3, 4)
blocks = [(0, 1), (2, 3, 4), (5, 6, 7, 8)]
# Within each block, no two vertices should be adjacent.
for block in blocks:
for u, v in itertools.combinations_with_replacement(block, 2):
assert v not in G[u]
assert G.nodes[u] == G.nodes[v]
# Across blocks, all vertices should be adjacent.
for block1, block2 in itertools.combinations(blocks, 2):
for u, v in itertools.product(block1, block2):
assert v in G[u]
assert G.nodes[u] != G.nodes[v]
with pytest.raises(nx.NetworkXError, match="Negative number of nodes"):
nx.complete_multipartite_graph(2, -3, 4)
def test_kneser_graph(self):
# the petersen graph is a special case of the kneser graph when n=5 and k=2
assert nx.could_be_isomorphic(nx.kneser_graph(5, 2), nx.petersen_graph())
# when k is 1, the kneser graph returns a complete graph with n vertices
for i in range(1, 7):
assert nx.could_be_isomorphic(nx.kneser_graph(i, 1), nx.complete_graph(i))
# the kneser graph of n and n-1 is the empty graph with n vertices
for j in range(3, 7):
assert nx.could_be_isomorphic(nx.kneser_graph(j, j - 1), nx.empty_graph(j))
# in general the number of edges of the kneser graph is equal to
# (n choose k) times (n-k choose k) divided by 2
assert nx.number_of_edges(nx.kneser_graph(8, 3)) == 280
| TestGeneratorClassic |
python | numpy__numpy | numpy/_core/tests/test_indexing.py | {
"start": 27065,
"end": 27925
} | class ____:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
pytest.warns(ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
pytest.warns(ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
| TestFancyIndexingCast |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 26027,
"end": 28377
} | class ____(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: dict[str, Any] | Any, **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs: The outputs of the chain.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
"""Run when agent action is received.
Args:
action: The agent action.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent finish is received.
Args:
finish: The agent finish.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
| CallbackManagerForChainRun |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_freezing_weights.py | {
"start": 976,
"end": 1875
} | class ____(nn.Module):
def __init__(
self,
with_fsdp,
freeze_after_wrap_fsdp,
disable_autograd,
fsdp_kwargs,
):
super().__init__()
self.trunk = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(64, 10)
if with_fsdp and freeze_after_wrap_fsdp:
self.fsdp_wrap(fsdp_kwargs)
self.autograd_ctx = (
torch.no_grad if disable_autograd else contextlib.nullcontext
)
def fsdp_wrap(self, fsdp_kwargs):
self.trunk = FSDP(self.trunk, **fsdp_kwargs)
self.head = FSDP(self.head, **fsdp_kwargs)
def forward(self, x):
with self.autograd_ctx():
x = self.trunk(x)
return self.head(x)
| Model |
python | dask__distributed | distributed/scheduler.py | {
"start": 127919,
"end": 335969
} | class ____(SchedulerState, ServerNode):
"""Dynamic distributed task scheduler
The scheduler tracks the current state of workers, data, and computations.
The scheduler listens for events and responds by controlling workers
appropriately. It continuously tries to use the workers to execute an ever
growing dask graph.
All events are handled quickly, in linear time with respect to their input
(which is often of constant size) and generally within a millisecond. To
accomplish this the scheduler tracks a lot of state. Every operation
maintains the consistency of this state.
The scheduler communicates with the outside world through Comm objects.
It maintains a consistent and valid view of the world even when listening
to several clients at once.
A Scheduler is typically started either with the ``dask scheduler``
executable::
$ dask scheduler
Scheduler started at 127.0.0.1:8786
Or within a LocalCluster a Client starts up without connection
information::
>>> c = Client() # doctest: +SKIP
>>> c.cluster.scheduler # doctest: +SKIP
Scheduler(...)
Users typically do not interact with the scheduler directly but rather with
the client object ``Client``.
The ``contact_address`` parameter allows to advertise a specific address to
the workers for communication with the scheduler, which is different than
the address the scheduler binds to. This is useful when the scheduler
listens on a private address, which therefore cannot be used by the workers
to contact it.
**State**
The scheduler contains the following state variables. Each variable is
listed along with what it stores and a brief description.
* **tasks:** ``{task key: TaskState}``
Tasks currently known to the scheduler
* **unrunnable:** ``{TaskState}``
Tasks in the "no-worker" state
* **workers:** ``{worker key: WorkerState}``
Workers currently connected to the scheduler
* **idle:** ``{WorkerState}``:
Set of workers that are not fully utilized
* **saturated:** ``{WorkerState}``:
Set of workers that are not over-utilized
* **host_info:** ``{hostname: dict}``:
Information about each worker host
* **clients:** ``{client key: ClientState}``
Clients currently connected to the scheduler
* **services:** ``{str: port}``:
Other services running on this scheduler, like Bokeh
* **loop:** ``IOLoop``:
The running Tornado IOLoop
* **client_comms:** ``{client key: Comm}``
For each client, a Comm object used to receive task requests and
report task status updates.
* **stream_comms:** ``{worker key: Comm}``
For each worker, a Comm object from which we both accept stimuli and
report results
* **task_duration:** ``{key-prefix: time}``
Time we expect certain functions to take, e.g. ``{'sum': 0.25}``
"""
default_port = 8786
_instances: ClassVar[weakref.WeakSet[Scheduler]] = weakref.WeakSet()
worker_ttl: float | None
idle_since: float | None
idle_timeout: float | None
_no_workers_since: float | None # Note: not None iff there are pending tasks
no_workers_timeout: float | None
_client_connections_added_total: int
_client_connections_removed_total: int
_workers_added_total: int
_workers_removed_total: int
_active_graph_updates: int
_starting_nannies: set[str]
worker_plugins: dict[str, bytes]
nanny_plugins: dict[str, bytes]
client_comms: dict[str, BatchedSend]
stream_comms: dict[str, BatchedSend]
cumulative_worker_metrics: defaultdict[tuple | str, int]
bandwidth_types: defaultdict[str, float]
bandwidth_workers: defaultdict[tuple[str, str], float]
services: dict
def __init__(
self,
loop: IOLoop | None = None,
services: dict | None = None,
service_kwargs: dict | None = None,
allowed_failures: int | None = None,
extensions: dict | None = None,
validate: bool | None = None,
scheduler_file: str | None = None,
security: dict | Security | None = None,
worker_ttl: float | None = None,
idle_timeout: float | None = None,
interface: str | None = None,
host: str | None = None,
port: int = 0,
protocol: str | None = None,
dashboard_address: str | None = None,
dashboard: bool | None = None,
http_prefix: str | None = "/",
preload: str | Sequence[str] | None = None,
preload_argv: str | Sequence[str] | Sequence[Sequence[str]] = (),
plugins: Sequence[SchedulerPlugin] = (),
contact_address: str | None = None,
transition_counter_max: bool | int = False,
jupyter: bool = False,
**kwargs: Any,
):
if dask.config.get("distributed.scheduler.pickle", default=True) is False:
raise RuntimeError(
"Pickling can no longer be disabled with the `distributed.scheduler.pickle` option. Please remove this configuration to start the scheduler."
)
if loop is not None:
warnings.warn(
"the loop kwarg to Scheduler is deprecated",
DeprecationWarning,
stacklevel=2,
)
self.loop = self.io_loop = IOLoop.current()
self._setup_logging(logger)
# Attributes
if contact_address is None:
contact_address = dask.config.get("distributed.scheduler.contact-address")
self.contact_address = contact_address
if allowed_failures is None:
allowed_failures = dask.config.get("distributed.scheduler.allowed-failures")
self.allowed_failures = allowed_failures
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.proc = psutil.Process()
self.service_specs = services or {}
self.service_kwargs = service_kwargs or {}
self.services = {}
self.scheduler_file = scheduler_file
self.worker_ttl = parse_timedelta(
worker_ttl or dask.config.get("distributed.scheduler.worker-ttl")
)
self.idle_timeout = parse_timedelta(
idle_timeout or dask.config.get("distributed.scheduler.idle-timeout")
)
self.idle_since = time()
self.no_workers_timeout = parse_timedelta(
dask.config.get("distributed.scheduler.no-workers-timeout")
)
self._no_workers_since = None
self.time_started = self.idle_since # compatibility for dask-gateway
self._replica_lock = RLock()
self.bandwidth_workers = defaultdict(float)
self.bandwidth_types = defaultdict(float)
# Don't cast int metrics to float
self.cumulative_worker_metrics = defaultdict(int)
if not preload:
preload = dask.config.get("distributed.scheduler.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.scheduler.preload-argv")
self.preloads = preloading.process_preloads(
self,
preload, # type: ignore
preload_argv,
)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("scheduler")
self.connection_args["handshake_overrides"] = { # common denominator
"pickle-protocol": 4
}
self._start_address = addresses_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
default_port=self.default_port,
)
http_server_modules = dask.config.get("distributed.scheduler.http.routes")
show_dashboard = dashboard or (dashboard is None and dashboard_address)
# install vanilla route if show_dashboard but bokeh is not installed
if show_dashboard:
try:
import distributed.dashboard.scheduler
except ImportError:
show_dashboard = False
http_server_modules.append("distributed.http.scheduler.missing_bokeh")
routes = get_handlers(
server=self, modules=http_server_modules, prefix=http_prefix
)
self.start_http_server(routes, dashboard_address, default_port=8787)
self.jupyter = jupyter
if show_dashboard:
distributed.dashboard.scheduler.connect(
self.http_application, self.http_server, self, prefix=http_prefix
)
scheduler = self
if self.jupyter:
try:
from jupyter_server.serverapp import ServerApp
except ImportError:
raise ImportError(
"In order to use the Dask jupyter option you "
"need to have jupyterlab installed"
)
from traitlets.config import Config
"""HTTP handler to shut down the Jupyter server.
"""
try:
from jupyter_server.auth import authorized
except ImportError:
def authorized(c: FuncT) -> FuncT:
return c
from jupyter_server.base.handlers import JupyterHandler
class ShutdownHandler(JupyterHandler):
"""A shutdown API handler."""
auth_resource = "server"
@tornado.web.authenticated
@authorized # type: ignore
async def post(self) -> None:
"""Shut down the server."""
self.log.info("Shutting down on /api/shutdown request.")
await scheduler.close(reason="jupyter-requested-shutdown")
j = ServerApp.instance(
config=Config(
{
"ServerApp": {
"base_url": "jupyter",
# SECURITY: We usually expect the dashboard to be a read-only view into
# the scheduler activity. However, by adding an open Jupyter application
# we are allowing arbitrary remote code execution on the scheduler via the
# dashboard server. This option should only be used when the dashboard is
# protected via other means, or when you don't care about cluster security.
"token": "",
"allow_remote_access": True,
}
}
)
)
j.initialize(
new_httpserver=False,
argv=[],
)
self._jupyter_server_application = j
shutdown_app = tornado.web.Application(
[(r"/jupyter/api/shutdown", ShutdownHandler)]
)
shutdown_app.settings = j.web_app.settings
self.http_application.add_application(shutdown_app)
self.http_application.add_application(j.web_app)
# Communication state
self.client_comms = {}
self.stream_comms = {}
# Task state
tasks: dict[Key, TaskState] = {}
self.generation = 0
self._last_client = None
self._last_time = 0.0
unrunnable: dict[TaskState, float] = {}
queued = HeapSet(key=operator.attrgetter("priority"))
# Prefix-keyed containers
# Client state
clients: dict[str, ClientState] = {}
# Worker state
workers = SortedDict()
host_info: dict[str, dict[str, Any]] = {}
resources: dict[str, dict[str, float]] = {}
aliases: dict[Hashable, str] = {}
self._worker_collections = [
workers,
host_info,
resources,
aliases,
]
maxlen = dask.config.get("distributed.admin.low-level-log-length")
self._broker = Broker(maxlen, self)
self.worker_plugins = {}
self.nanny_plugins = {}
self._starting_nannies = set()
self._starting_nannies_cond = asyncio.Condition()
worker_handlers = {
"task-finished": self.handle_task_finished,
"task-erred": self.handle_task_erred,
"release-worker-data": self.release_worker_data,
"add-keys": self.add_keys,
"long-running": self.handle_long_running,
"reschedule": self._reschedule,
"keep-alive": lambda *args, **kwargs: None,
"log-event": self.log_worker_event,
"worker-status-change": self.handle_worker_status_change,
"request-refresh-who-has": self.handle_request_refresh_who_has,
}
client_handlers = {
"update-graph": self.update_graph,
"client-desires-keys": self.client_desires_keys,
"update-data": self.update_data,
"report-key": self.report_on_key,
"client-releases-keys": self.client_releases_keys,
"heartbeat-client": self.client_heartbeat,
"close-client": self.remove_client,
"subscribe-topic": self.subscribe_topic,
"unsubscribe-topic": self.unsubscribe_topic,
"cancel-keys": self.stimulus_cancel,
}
self.handlers = {
"register-client": self.add_client,
"scatter": self.scatter,
"register-worker": self.add_worker,
"register_nanny": self.add_nanny,
"unregister": self.remove_worker,
"gather": self.gather,
"retry": self.stimulus_retry,
"feed": self.feed,
"terminate": self.close,
"broadcast": self.broadcast,
"proxy": self.proxy,
"ncores": self.get_ncores,
"ncores_running": self.get_ncores_running,
"has_what": self.get_has_what,
"who_has": self.get_who_has,
"processing": self.get_processing,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"performance_report": self.performance_report,
"get_logs": self.get_logs,
"logs": self.get_logs,
"worker_logs": self.get_worker_logs,
"log_event": self.log_event,
"events": self.get_events,
"nbytes": self.get_nbytes,
"versions": self.versions,
"add_keys": self.add_keys,
"rebalance": self.rebalance,
"replicate": self.replicate,
"run_function": self.run_function,
"restart": self.restart,
"restart_workers": self.restart_workers,
"update_data": self.update_data,
"set_resources": self.add_resources,
"retire_workers": self.retire_workers,
"get_metadata": self.get_metadata,
"set_metadata": self.set_metadata,
"set_restrictions": self.set_restrictions,
"heartbeat_worker": self.heartbeat_worker,
"get_task_status": self.get_task_status,
"get_task_stream": self.get_task_stream,
"get_task_prefix_states": self.get_task_prefix_states,
"register_scheduler_plugin": self.register_scheduler_plugin,
"unregister_scheduler_plugin": self.unregister_scheduler_plugin,
"register_worker_plugin": self.register_worker_plugin,
"unregister_worker_plugin": self.unregister_worker_plugin,
"register_nanny_plugin": self.register_nanny_plugin,
"unregister_nanny_plugin": self.unregister_nanny_plugin,
"adaptive_target": self.adaptive_target,
"workers_to_close": self.workers_to_close,
"subscribe_worker_status": self.subscribe_worker_status,
"start_task_metadata": self.start_task_metadata,
"stop_task_metadata": self.stop_task_metadata,
"get_cluster_state": self.get_cluster_state,
"dump_cluster_state_to_url": self.dump_cluster_state_to_url,
"benchmark_hardware": self.benchmark_hardware,
"get_story": self.get_story,
"check_idle": self.check_idle,
}
connection_limit = get_fileno_limit() / 2
SchedulerState.__init__(
self,
aliases=aliases,
clients=clients,
workers=workers,
host_info=host_info,
resources=resources,
tasks=tasks,
unrunnable=unrunnable,
queued=queued,
validate=validate,
plugins=plugins,
transition_counter_max=transition_counter_max,
)
ServerNode.__init__(
self,
handlers=self.handlers,
stream_handlers=merge(worker_handlers, client_handlers),
connection_limit=connection_limit,
deserialize=False,
connection_args=self.connection_args,
**kwargs,
)
if self.worker_ttl:
pc = PeriodicCallback(self.check_worker_ttl, self.worker_ttl * 1000)
self.periodic_callbacks["worker-ttl"] = pc
pc = PeriodicCallback(self.check_idle, 250) # type: ignore
self.periodic_callbacks["idle-timeout"] = pc
pc = PeriodicCallback(self._check_no_workers, 250)
self.periodic_callbacks["no-workers-timeout"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS.copy()
if not dask.config.get("distributed.scheduler.work-stealing"):
if "stealing" in extensions:
del extensions["stealing"]
for name, extension in extensions.items():
self.extensions[name] = extension(self)
setproctitle("dask scheduler [not started]")
Scheduler._instances.add(self)
self.rpc.allow_offload = False
self._client_connections_added_total = 0
self._client_connections_removed_total = 0
self._workers_added_total = 0
self._workers_removed_total = 0
self._active_graph_updates = 0
##################
# Administration #
##################
def __repr__(self) -> str:
return (
f"<Scheduler {self.address_safe!r}, "
f"workers: {len(self.workers)}, "
f"cores: {self.total_nthreads}, "
f"tasks: {len(self.tasks)}>"
)
def _repr_html_(self) -> str:
return get_template("scheduler.html.j2").render(
address=self.address,
workers=self.workers,
threads=self.total_nthreads,
tasks=self.tasks,
)
def identity(self, n_workers: int = -1) -> dict[str, Any]:
"""Basic information about ourselves and our cluster"""
if n_workers == -1:
n_workers = len(self.workers)
d = {
"type": type(self).__name__,
"id": str(self.id),
"address": self.address,
"services": {key: v.port for (key, v) in self.services.items()},
"started": self.time_started,
"n_workers": len(self.workers),
"total_threads": self.total_nthreads,
"total_memory": self.total_memory,
"workers": {
worker.address: worker.identity()
for worker in itertools.islice(self.workers.values(), n_workers)
},
}
return d
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Server.identity
Client.dump_cluster_state
distributed.utils.recursive_to_dict
"""
info = super()._to_dict(exclude=exclude)
extra = {
"transition_log": self.transition_log,
"transition_counter": self.transition_counter,
"tasks": self.tasks,
"task_groups": self.task_groups,
# Overwrite dict of WorkerState.identity from info
"workers": self.workers,
"clients": self.clients,
"memory": self.memory,
"events": self._broker._topics,
"extensions": self.extensions,
}
extra = {k: v for k, v in extra.items() if k not in exclude}
info.update(recursive_to_dict(extra, exclude=exclude))
return info
async def get_cluster_state(
self,
exclude: Collection[str],
) -> dict:
"Produce the state dict used in a cluster state dump"
# Kick off state-dumping on workers before we block the event loop in `self._to_dict`.
workers_future = asyncio.gather(
self.broadcast(
msg={"op": "dump_state", "exclude": exclude},
on_error="return",
),
self.broadcast(
msg={"op": "versions"},
on_error="ignore",
),
)
try:
scheduler_state = self._to_dict(exclude=exclude)
worker_states, worker_versions = await workers_future
finally:
# Ensure the tasks aren't left running if anything fails.
# Someday (py3.11), use a trio-style TaskGroup for this.
workers_future.cancel()
# Convert any RPC errors to strings
worker_states = {
k: repr(v) if isinstance(v, Exception) else v
for k, v in worker_states.items()
}
return {
"scheduler": scheduler_state,
"workers": worker_states,
"versions": {"scheduler": self.versions(), "workers": worker_versions},
}
async def dump_cluster_state_to_url(
self,
url: str,
exclude: Collection[str],
format: Literal["msgpack", "yaml"],
**storage_options: dict[str, Any],
) -> None:
"Write a cluster state dump to an fsspec-compatible URL."
await cluster_dump.write_state(
partial(self.get_cluster_state, exclude), url, format, **storage_options
)
def get_worker_service_addr(
self, worker: str, service_name: str, protocol: bool = False
) -> tuple[str, int] | str | None:
"""
Get the (host, port) address of the named service on the *worker*.
Returns None if the service doesn't exist.
Parameters
----------
worker : address
service_name : str
Common services include 'bokeh' and 'nanny'
protocol : boolean
Whether or not to include a full address with protocol (True)
or just a (host, port) pair
"""
ws = self.workers[worker]
port = ws.services.get(service_name)
if port is None:
return None
elif protocol:
return "%(protocol)s://%(host)s:%(port)d" % {
"protocol": ws.address.split("://")[0],
"host": ws.host,
"port": port,
}
else:
return ws.host, port
async def start_unsafe(self) -> Self:
"""Clear out old state and restart all running coroutines"""
await super().start_unsafe()
enable_gc_diagnosis()
self._clear_task_state()
for addr in self._start_address:
await self.listen(
addr,
allow_offload=False,
handshake_overrides={"pickle-protocol": 4, "compression": None},
**self.security.get_listen_args("scheduler"),
)
self.ip = get_address_host(self.listen_address)
listen_ip = self.ip
if listen_ip == "0.0.0.0":
listen_ip = ""
if self.address.startswith("inproc://"):
listen_ip = "localhost"
# Services listen on all addresses
self.start_services(listen_ip)
for listener in self.listeners:
logger.info(" Scheduler at: %25s", listener.contact_address)
for name, server in self.services.items():
if name == "dashboard":
addr = get_address_host(listener.contact_address)
try:
link = format_dashboard_link(addr, server.port)
# formatting dashboard link can fail if distributed.dashboard.link
# refers to non-existent env vars.
except KeyError as e:
logger.warning(
f"Failed to format dashboard link, unknown value: {e}"
)
link = f":{server.port}"
else:
link = f"{listen_ip}:{server.port}"
logger.info("%11s at: %25s", name, link)
if self.scheduler_file:
with open(self.scheduler_file, "w") as f:
json.dump(self.identity(), f, indent=2)
fn = self.scheduler_file # remove file when we close the process
def del_scheduler_file() -> None:
if os.path.exists(fn):
os.remove(fn)
weakref.finalize(self, del_scheduler_file)
await self.preloads.start()
if self.jupyter:
# Allow insecure communications from local users
if self.address.startswith("tls://"):
await self.listen("tcp://localhost:0")
os.environ["DASK_SCHEDULER_ADDRESS"] = self.listeners[-1].contact_address
await asyncio.gather(
*[plugin.start(self) for plugin in list(self.plugins.values())]
)
self.start_periodic_callbacks()
setproctitle(f"dask scheduler [{self.address}]")
return self
async def close(
self,
timeout: float | None = None,
reason: str = "unknown",
) -> None:
"""Send cleanup signal to all coroutines then wait until finished
See Also
--------
Scheduler.cleanup
"""
if self.status in (Status.closing, Status.closed):
await self.finished()
return
self.status = Status.closing
logger.info("Closing scheduler. Reason: %s", reason)
setproctitle("dask scheduler [closing]")
async def log_errors(func: Callable) -> None:
try:
await func()
except Exception:
logger.exception("Plugin call failed during scheduler.close")
await asyncio.gather(
*[log_errors(plugin.before_close) for plugin in list(self.plugins.values())]
)
await self.preloads.teardown()
await asyncio.gather(
*[log_errors(plugin.close) for plugin in list(self.plugins.values())]
)
for pc in self.periodic_callbacks.values():
pc.stop()
self.periodic_callbacks.clear()
self.stop_services()
for ext in self.extensions.values():
with suppress(AttributeError):
ext.teardown()
logger.info("Scheduler closing all comms")
futures = []
for _, comm in list(self.stream_comms.items()):
# FIXME use `self.remove_worker()` instead after https://github.com/dask/distributed/issues/6390
if not comm.closed():
# This closes the Worker and ensures that if a Nanny is around,
# it is closed as well
comm.send({"op": "close", "reason": "scheduler-close"})
comm.send({"op": "close-stream"})
# ^ TODO remove? `Worker.close` will close the stream anyway.
with suppress(AttributeError):
futures.append(comm.close())
await asyncio.gather(*futures)
if self.jupyter:
await self._jupyter_server_application._cleanup()
for comm in self.client_comms.values():
comm.abort()
await self.rpc.close()
self.status = Status.closed
self.stop()
await super().close()
setproctitle("dask scheduler [closed]")
disable_gc_diagnosis()
###########
# Stimuli #
###########
def heartbeat_worker(
self,
*,
address: str,
resolve_address: bool = True,
now: float | None = None,
resources: dict[str, float] | None = None,
host_info: dict | None = None,
metrics: dict,
executing: dict[Key, float] | None = None,
extensions: dict | None = None,
) -> dict[str, Any]:
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
ws = self.workers.get(address)
if ws is None:
logger.warning(f"Received heartbeat from unregistered worker {address!r}.")
return {"status": "missing"}
host = get_address_host(address)
local_now = time()
host_info = host_info or {}
dh = self.host_info.setdefault(host, {})
dh["last-seen"] = local_now
frac = 1 / len(self.workers)
self.bandwidth = (
self.bandwidth * (1 - frac) + metrics["bandwidth"]["total"] * frac
)
for other, (bw, count) in metrics["bandwidth"]["workers"].items():
if (address, other) not in self.bandwidth_workers:
self.bandwidth_workers[address, other] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_workers[address, other] = self.bandwidth_workers[
address, other
] * alpha + bw * (1 - alpha)
for typ, (bw, count) in metrics["bandwidth"]["types"].items():
if typ not in self.bandwidth_types:
self.bandwidth_types[typ] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_types[typ] = self.bandwidth_types[typ] * alpha + bw * (
1 - alpha
)
ws.last_seen = local_now
if executing is not None:
# NOTE: the executing dict is unused
ws.executing = {}
for key, duration in executing.items():
if key in self.tasks:
ts = self.tasks[key]
ws.executing[ts] = duration
ts.prefix.add_exec_time(duration)
for name, value in metrics["digests_total_since_heartbeat"].items():
self.cumulative_worker_metrics[name] += value
ws.metrics = metrics
# Calculate RSS - dask keys, separating "old" and "new" usage
# See MemoryState for details
max_memory_unmanaged_old_hist_age = local_now - self.MEMORY_RECENT_TO_OLD_TIME
memory_unmanaged_old = ws._memory_unmanaged_old
while ws._memory_unmanaged_history:
timestamp, size = ws._memory_unmanaged_history[0]
if timestamp >= max_memory_unmanaged_old_hist_age:
break
ws._memory_unmanaged_history.popleft()
if size == memory_unmanaged_old:
memory_unmanaged_old = 0 # recalculate min()
# ws._nbytes is updated at a different time and sizeof() may not be accurate,
# so size may be (temporarily) negative; floor it to zero.
size = max(
0, metrics["memory"] - ws.nbytes + metrics["spilled_bytes"]["memory"]
)
ws._memory_unmanaged_history.append((local_now, size))
if not memory_unmanaged_old:
# The worker has just been started or the previous minimum has been expunged
# because too old.
# Note: this algorithm is capped to 200 * MEMORY_RECENT_TO_OLD_TIME elements
# cluster-wide by heartbeat_interval(), regardless of the number of workers
ws._memory_unmanaged_old = min(map(second, ws._memory_unmanaged_history))
elif size < memory_unmanaged_old:
ws._memory_unmanaged_old = size
if host_info:
dh = self.host_info.setdefault(host, {})
dh.update(host_info)
if now:
ws.time_delay = local_now - now
if resources:
self.add_resources(worker=address, resources=resources)
if extensions:
for name, data in extensions.items():
self.extensions[name].heartbeat(ws, data)
return {
"status": "OK",
"time": local_now,
"heartbeat-interval": heartbeat_interval(len(self.workers)),
}
@log_errors
async def add_worker(
self,
comm: Comm,
*,
address: str,
status: str,
server_id: str,
nthreads: int,
name: str,
resolve_address: bool = True,
now: float,
resources: dict[str, float],
# FIXME: This is never submitted by the worker
host_info: None = None,
memory_limit: int | None,
metrics: dict[str, Any],
pid: int = 0,
services: dict[str, int],
local_directory: str,
versions: dict[str, Any],
nanny: str,
extra: dict,
stimulus_id: str,
) -> None:
"""Add a new worker to the cluster"""
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
host = get_address_host(address)
if address in self.workers:
raise ValueError("Worker already exists %s" % address)
if name in self.aliases:
logger.warning("Worker tried to connect with a duplicate name: %s", name)
msg = {
"status": "error",
"message": "name taken, %s" % name,
"time": time(),
}
await comm.write(msg)
return
self.log_event(address, {"action": "add-worker"})
self.log_event("all", {"action": "add-worker", "worker": address})
self.workers[address] = ws = WorkerState(
address=address,
status=Status.lookup[status], # type: ignore
pid=pid,
nthreads=nthreads,
memory_limit=memory_limit or 0,
name=name,
local_directory=local_directory,
services=services,
versions=versions,
nanny=nanny,
extra=extra,
server_id=server_id,
scheduler=self,
)
self._workers_added_total += 1
if ws.status == Status.running:
self.running.add(ws)
self._refresh_no_workers_since()
dh = self.host_info.get(host)
if dh is None:
self.host_info[host] = dh = {}
dh_addresses = dh.get("addresses")
if dh_addresses is None:
dh["addresses"] = dh_addresses = set()
dh["nthreads"] = 0
dh_addresses.add(address)
dh["nthreads"] += nthreads
self.total_memory += ws.memory_limit
self.total_nthreads += nthreads
self.total_nthreads_history.append((time(), self.total_nthreads))
self.aliases[name] = address
self.heartbeat_worker(
address=address,
resolve_address=resolve_address,
now=now,
resources=resources,
host_info=host_info,
metrics=metrics,
)
# Do not need to adjust self.total_occupancy as self.occupancy[ws] cannot
# exist before this.
self.check_idle_saturated(ws)
self.stream_comms[address] = BatchedSend(interval="5ms", loop=self.loop)
awaitables = []
for plugin in list(self.plugins.values()):
try:
result = plugin.add_worker(scheduler=self, worker=address)
if result is not None and inspect.isawaitable(result):
awaitables.append(result)
except Exception as e:
logger.exception(e)
plugin_msgs = await asyncio.gather(*awaitables, return_exceptions=True)
plugins_exceptions = [msg for msg in plugin_msgs if isinstance(msg, Exception)]
for exc in plugins_exceptions:
logger.exception(exc, exc_info=exc)
if ws.status == Status.running:
self.transitions(
self.bulk_schedule_unrunnable_after_adding_worker(ws), stimulus_id
)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
logger.info("Register worker addr: %s name: %s", ws.address, ws.name)
msg = {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
"worker-plugins": self.worker_plugins,
}
version_warning = version_module.error_message(
version_module.get_versions(),
{w: ws.versions for w, ws in self.workers.items()},
versions,
source_name=str(ws.server_id),
)
msg.update(version_warning)
await comm.write(msg)
# This will keep running until the worker is removed
await self.handle_worker(comm, address)
async def add_nanny(self, comm: Comm, address: str) -> None:
async with self._starting_nannies_cond:
self._starting_nannies.add(address)
try:
msg = {
"status": "OK",
"nanny-plugins": self.nanny_plugins,
}
await comm.write(msg)
await comm.read()
finally:
async with self._starting_nannies_cond:
self._starting_nannies.discard(address)
self._starting_nannies_cond.notify_all()
def _find_lost_dependencies(
self,
dsk: dict[Key, T_runspec],
keys: set[Key],
) -> set[Key]:
# FIXME: There is typically no need to walk the entire graph
lost_keys = set()
seen: set[Key] = set()
sadd = seen.add
for k in list(keys):
work = {k}
wpop = work.pop
wupdate = work.update
while work:
d = wpop()
if d in seen:
continue
sadd(d)
if d not in dsk:
if d not in self.tasks:
lost_keys.add(d)
lost_keys.add(k)
logger.info(
"User asked for computation on lost data. Final key is %s with missing dependency %s",
k,
d,
)
continue
wupdate(dsk[d].dependencies)
return lost_keys
def _create_taskstate_from_graph(
self,
*,
start: float,
dsk: dict[Key, T_runspec],
keys: set[Key],
ordered: dict[Key, int],
client: str,
annotations_by_type: dict,
global_annotations: dict | None,
stimulus_id: str,
submitting_task: Key | None,
span_metadata: SpanMetadata,
user_priority: int | dict[Key, int] = 0,
actors: bool | list[Key] | None = None,
fifo_timeout: float = 0.0,
code: tuple[SourceCode, ...] = (),
) -> dict[str, float]:
"""
Take a low level graph and create the necessary scheduler state to
compute it.
WARNING
-------
This method must not be made async since nothing here is concurrency
safe. All interactions with TaskState objects here should be happening
in the same event loop tick.
"""
if not self.is_idle and self.computations:
# Still working on something. Assign new tasks to same computation
computation = self.computations[-1]
else:
computation = Computation()
self.computations.append(computation)
if code: # add new code blocks
computation.code.add(code)
if global_annotations:
# FIXME: This is kind of inconsistent since it only includes global
# annotations.
computation.annotations.update(global_annotations)
(
touched_tasks,
new_tasks,
colliding_task_count,
) = self._generate_taskstates(
keys=keys,
dsk=dsk,
computation=computation,
)
metrics = {
"tasks": len(dsk),
"new_tasks": len(new_tasks),
"key_collisions": colliding_task_count,
}
keys_with_annotations = self._apply_annotations(
tasks=new_tasks,
annotations_by_type=annotations_by_type,
global_annotations=global_annotations,
)
self._set_priorities(
internal_priority=ordered,
submitting_task=submitting_task,
user_priority=user_priority,
fifo_timeout=fifo_timeout,
start=start,
tasks=touched_tasks,
)
self.client_desires_keys(keys=keys, client=client)
# Add actors
if actors is True:
actors = list(keys)
for actor in actors or []:
ts = self.tasks[actor]
ts.actor = True
# Compute recommendations
recommendations: Recs = {}
for ts in sorted(
filter(
lambda ts: ts.state == "released",
map(self.tasks.__getitem__, keys),
),
key=operator.attrgetter("priority"),
reverse=True,
):
recommendations[ts.key] = "waiting"
for ts in touched_tasks:
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[ts.key] = "erred"
break
annotations_for_plugin: defaultdict[str, dict[Key, Any]] = defaultdict(dict)
for key in keys_with_annotations:
ts = self.tasks[key]
if ts.annotations:
for annot, value in ts.annotations.items():
annotations_for_plugin[annot][key] = value
spans_ext: SpansSchedulerExtension | None = self.extensions.get("spans")
if spans_ext:
# new_tasks does not necessarily contain all runnable tasks;
# _generate_taskstates is not the only thing that calls new_task(). A
# TaskState may have also been created by client_desires_keys or scatter,
# and only later gained a run_spec.
span_annotations = spans_ext.observe_tasks(
touched_tasks, span_metadata=span_metadata, code=code
)
# In case of TaskGroup collision, spans may have changed
# FIXME: Is this used anywhere besides tests?
if span_annotations:
annotations_for_plugin["span"] = span_annotations
else:
annotations_for_plugin.pop("span", None)
tasks_for_plugin = [ts.key for ts in touched_tasks]
priorities_for_plugin = {ts.key: ts.priority for ts in touched_tasks}
for plugin in list(self.plugins.values()):
try:
plugin.update_graph(
self,
client=client,
tasks=tasks_for_plugin,
keys=keys,
annotations=annotations_for_plugin,
priority=priorities_for_plugin,
stimulus_id=stimulus_id,
)
except Exception as e:
logger.exception(e)
self.transitions(recommendations, stimulus_id)
for ts in touched_tasks:
if ts.state in ("memory", "erred"):
self.report_on_key(ts=ts, client=client)
return metrics
@log_errors
async def update_graph(
self,
client: str,
expr_ser: Serialized,
keys: set[Key],
span_metadata: SpanMetadata,
internal_priority: dict[Key, int] | None,
submitting_task: Key | None,
user_priority: int | dict[Key, int] = 0,
actors: bool | list[Key] | None = None,
fifo_timeout: float = 0.0,
code: tuple[SourceCode, ...] = (),
annotations: dict | None = None,
stimulus_id: str | None = None,
) -> None:
start = time()
stimulus_id = stimulus_id or f"update-graph-{start}"
self._active_graph_updates += 1
evt_msg: dict[str, Any]
try:
logger.debug("Received new graph. Deserializing...")
try:
expr = deserialize(expr_ser.header, expr_ser.frames)
del expr_ser
except Exception as e:
msg = """\
Error during deserialization of the task graph. This frequently
occurs if the Scheduler and Client have different environments.
For more information, see
https://docs.dask.org/en/stable/deployment-considerations.html#consistent-software-environments
"""
raise RuntimeError(textwrap.dedent(msg)) from e
(
dsk,
annotations_by_type,
) = await offload(
_materialize_graph,
expr=expr,
validate=self.validate,
)
materialization_done = time()
logger.debug("Materialization done. Got %i tasks.", len(dsk))
# Most/all other expression types are implementing their own
# culling. For LLGExpr we just don't know
explicit_culling = isinstance(expr, LLGExpr)
del expr
if explicit_culling:
dsk = _cull(dsk, keys)
if not internal_priority:
internal_priority = await offload(dask.order.order, dsk=dsk)
ordering_done = time()
logger.debug("Ordering done.")
# *************************************
# BELOW THIS LINE HAS TO BE SYNCHRONOUS
#
# Everything that compares the submitted graph to the current state
# has to happen in the same event loop.
# *************************************
if self._find_lost_dependencies(dsk, keys):
self.report(
{
"op": "cancelled-keys",
"keys": keys,
"reason": "lost dependencies",
},
client=client,
)
self.client_releases_keys(
keys=keys, client=client, stimulus_id=stimulus_id
)
evt_msg = {
"action": "update-graph",
"stimulus_id": stimulus_id,
"status": "cancelled",
}
self.log_event(["scheduler", client], evt_msg)
return
before = len(self.tasks)
metrics = self._create_taskstate_from_graph(
dsk=dsk,
client=client,
keys=set(keys),
ordered=internal_priority or {},
submitting_task=submitting_task,
user_priority=user_priority,
actors=actors,
fifo_timeout=fifo_timeout,
code=code,
span_metadata=span_metadata,
annotations_by_type=annotations_by_type,
global_annotations=annotations,
start=start,
stimulus_id=stimulus_id,
)
task_state_created = time()
metrics.update(
{
"start_timestamp_seconds": start,
"materialization_duration_seconds": materialization_done - start,
"ordering_duration_seconds": materialization_done - ordering_done,
"state_initialization_duration_seconds": ordering_done
- task_state_created,
"duration_seconds": task_state_created - start,
}
)
evt_msg = {
"action": "update-graph",
"stimulus_id": stimulus_id,
"metrics": metrics,
"status": "OK",
}
self.log_event(["scheduler", client], evt_msg)
logger.debug("Task state created. %i new tasks", len(self.tasks) - before)
except Exception as e:
evt_msg = {
"action": "update-graph",
"stimulus_id": stimulus_id,
"status": "error",
}
self.log_event(["scheduler", client], evt_msg)
logger.error(str(e))
err = error_message(e)
for key in keys:
self.report(
{
"op": "task-erred",
"key": key,
"exception": err["exception"],
"traceback": err["traceback"],
},
# This informs all clients in who_wants plus the current client
# (which may not have been added to who_wants yet)
client=client,
)
finally:
self._active_graph_updates -= 1
assert self._active_graph_updates >= 0
end = time()
self.digest_metric("update-graph-duration", end - start)
def _generate_taskstates(
self,
keys: set[Key],
dsk: dict[Key, T_runspec],
computation: Computation,
) -> tuple:
# Get or create task states
new_tasks = []
stack = list(keys)
touched_keys = set()
touched_tasks = []
tgs_with_bad_run_spec = set()
colliding_task_count = 0
collisions = set()
while stack:
k = stack.pop()
if k in touched_keys:
continue
ts = self.tasks.get(k)
if ts is None:
ts = self.new_task(k, dsk.get(k), "released", computation=computation)
new_tasks.append(ts)
# It is possible to create the TaskState object before its runspec is known
# to the scheduler. For instance, this is possible when using a Variable:
# `f = c.submit(foo); await Variable().set(f)` since the Variable uses a
# different comm channel, so the `client_desires_key` message could arrive
# before `update_graph`.
# There are also anti-pattern processes possible;
# see for example test_scatter_creates_ts
elif ts.run_spec is None:
ts.run_spec = dsk.get(k)
# run_spec in the submitted graph may be None. This happens
# when an already persisted future is part of the graph
elif k in dsk:
# Check dependency names.
deps_lhs = {dts.key for dts in ts.dependencies}
deps_rhs = dsk[k].dependencies
# FIXME It would be a really healthy idea to change this to a hard
# failure. However, this is not possible at the moment because of
# https://github.com/dask/dask/issues/9888
if deps_lhs != deps_rhs:
collisions.add(k)
colliding_task_count += 1
if ts.group not in tgs_with_bad_run_spec:
tgs_with_bad_run_spec.add(ts.group)
logger.warning(
f"Detected different `run_spec` for key {ts.key!r} between "
"two consecutive calls to `update_graph`. "
"This can cause failures and deadlocks down the line. "
"Please ensure unique key names. "
"If you are using a standard dask collections, consider "
"releasing all the data before resubmitting another "
"computation. More details and help can be found at "
"https://github.com/dask/dask/issues/9888. "
+ textwrap.dedent(
f"""
Debugging information
---------------------
old task state: {ts.state}
old run_spec: {ts.run_spec!r}
new run_spec: {dsk[k]!r}
old dependencies: {deps_lhs}
new dependencies: {deps_rhs}
"""
)
)
else:
logger.debug(
f"Detected different `run_spec` for key {ts.key!r} between "
"two consecutive calls to `update_graph`."
)
touched_keys.add(k)
touched_tasks.append(ts)
if tspec := dsk.get(k, ()):
stack.extend(tspec.dependencies)
# Add dependencies
for key, tspec in dsk.items():
ts = self.tasks.get(key)
if ts is None or key in collisions:
continue
for dep in tspec.dependencies:
dts = self.tasks[dep]
ts.add_dependency(dts)
if len(touched_tasks) < len(keys):
logger.info(
"Submitted graph with length %s but requested graph only includes %s keys",
len(touched_tasks),
len(keys),
)
return touched_tasks, new_tasks, colliding_task_count
def _apply_annotations(
self,
tasks: Iterable[TaskState],
annotations_by_type: dict[str, dict[Key, Any]],
global_annotations: dict[str, Any] | None = None,
) -> set[Key]:
"""Apply the provided annotations to the provided `TaskState` objects.
The raw annotations will be stored in the `annotations` attribute.
Layer / key specific annotations will take precedence over global / generic annotations.
Parameters
----------
tasks : Iterable[TaskState]
_description_
annotations : dict
_description_
Returns
-------
keys_with_annotations
"""
keys_with_annotations: set[Key] = set()
if not annotations_by_type and not global_annotations:
return keys_with_annotations
for ts in tasks:
key = ts.key
ts_annotations = {}
if global_annotations:
for annot, value in global_annotations.items():
if callable(value):
value = value(ts.key)
ts_annotations[annot] = value
for annot, key_value in annotations_by_type.items():
if (value := key_value.get(key)) is not None:
ts_annotations[annot] = value
if not ts_annotations:
continue
keys_with_annotations.add(key)
ts.annotations = ts_annotations
for annot, value in ts_annotations.items():
if annot in ("restrictions", "workers"):
if not isinstance(value, (list, tuple, set)):
value = [value]
host_restrictions = set()
worker_restrictions = set()
for w in value:
try:
w = self.coerce_address(w)
except ValueError:
# Not a valid address, but perhaps it's a hostname
host_restrictions.add(w)
else:
worker_restrictions.add(w)
if host_restrictions:
ts.host_restrictions = host_restrictions
if worker_restrictions:
ts.worker_restrictions = worker_restrictions
elif annot in ("loose_restrictions", "allow_other_workers"):
ts.loose_restrictions = value
elif annot == "resources":
assert isinstance(value, dict)
ts.resource_restrictions = value
elif annot == "priority":
# See Scheduler._set_priorities
continue
elif annot == "retries":
assert isinstance(value, int)
ts.retries = value
return keys_with_annotations
def _set_priorities(
self,
internal_priority: dict[Key, int],
submitting_task: Key | None,
user_priority: int | dict[Key, int],
fifo_timeout: int | float | str,
start: float,
tasks: set[TaskState],
) -> None:
fifo_timeout = parse_timedelta(fifo_timeout)
if submitting_task: # sub-tasks get better priority than parent tasks
sts = self.tasks.get(submitting_task)
if sts is not None:
assert sts.priority
generation = sts.priority[0] - 0.01
else: # super-task already cleaned up
generation = self.generation
elif self._last_time + fifo_timeout < start:
self.generation += 1 # older graph generations take precedence
generation = self.generation
self._last_time = start
else:
generation = self.generation
for ts in tasks:
if isinstance(user_priority, dict):
task_user_prio = user_priority.get(ts.key, 0)
else:
task_user_prio = user_priority
# Annotations that are already assigned to the TaskState object
# originate from a Layer annotation which takes precedence over the
# global annotation.
if ts.annotations:
annotated_prio = ts.annotations.get("priority", task_user_prio)
else:
annotated_prio = task_user_prio
if not ts.priority and ts.key in internal_priority:
ts.priority = (
-annotated_prio,
generation,
internal_priority[ts.key],
)
if self.validate and istask(ts.run_spec):
assert isinstance(ts.priority, tuple) and all(
isinstance(el, (int, float)) for el in ts.priority
)
def stimulus_queue_slots_maybe_opened(self, *, stimulus_id: str) -> None:
"""Respond to an event which may have opened spots on worker threadpools
Selects the appropriate number of tasks from the front of the queue according to
the total number of task slots available on workers (potentially 0), and
transitions them to ``processing``.
Notes
-----
Other transitions related to this stimulus should be fully processed beforehand,
so any tasks that became runnable are already in ``processing``. Otherwise,
overproduction can occur if queued tasks get scheduled before downstream tasks.
Must be called after `check_idle_saturated`; i.e. `idle_task_count` must be up to date.
"""
if not self.queued:
return
slots_available = sum(
_task_slots_available(ws, self.WORKER_SATURATION)
for ws in self.idle_task_count
)
if slots_available == 0:
return
for _ in range(slots_available):
if not self.queued:
return
# Ideally, we'd be popping it here already but this would break
# certain state invariants since the task is not transitioned, yet
qts = self.queued.peek()
if self.validate:
assert qts.state == "queued", qts.state
assert not qts.processing_on, (qts, qts.processing_on)
assert not qts.waiting_on, (qts, qts.processing_on)
assert qts.who_wants or qts.waiters, qts
# This removes the task from the top of the self.queued heap
self.transitions({qts.key: "processing"}, stimulus_id)
if self.validate:
assert qts.state == "processing"
assert not self.queued or self.queued.peek() != qts
def stimulus_task_finished(
self, key: Key, worker: str, stimulus_id: str, run_id: int, **kwargs: Any
) -> RecsMsgs:
"""Mark that a task has finished execution on a particular worker"""
logger.debug("Stimulus task finished %s[%d] %s", key, run_id, worker)
recommendations: Recs = {}
client_msgs: Msgs = {}
worker_msgs: Msgs = {}
ts = self.tasks.get(key)
if ts is None or ts.state in ("released", "queued", "no-worker"):
logger.debug(
"Received already computed task, worker: %s, state: %s"
", key: %s, who_has: %s",
worker,
ts.state if ts else "forgotten",
key,
ts.who_has if ts else {},
)
worker_msgs[worker] = [
{
"op": "free-keys",
"keys": [key],
"stimulus_id": stimulus_id,
}
]
elif ts.state == "erred":
logger.debug(
"Received already erred task, worker: %s" ", key: %s",
worker,
key,
)
worker_msgs[worker] = [
{
"op": "free-keys",
"keys": [key],
"stimulus_id": stimulus_id,
}
]
elif ts.run_id != run_id:
if not ts.processing_on or ts.processing_on.address != worker:
logger.debug(
"Received stale task run, worker: %s, key: %s, run_id: %d (%d)",
worker,
key,
run_id,
ts.run_id,
)
worker_msgs[worker] = [
{
"op": "free-keys",
"keys": [key],
"stimulus_id": stimulus_id,
}
]
else:
recommendations[ts.key] = "released"
elif ts.state == "memory":
self.add_keys(worker=worker, keys=[key])
else:
if kwargs["metadata"]:
if ts.metadata is None:
ts.metadata = dict()
ts.metadata.update(kwargs["metadata"])
return self._transition(key, "memory", stimulus_id, worker=worker, **kwargs)
return recommendations, client_msgs, worker_msgs
def stimulus_task_erred(
self,
key: Key,
worker: str,
exception: Any,
stimulus_id: str,
traceback: Any,
run_id: str,
**kwargs: Any,
) -> RecsMsgs:
"""Mark that a task has erred on a particular worker"""
logger.debug("Stimulus task erred %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None or ts.state != "processing":
return {}, {}, {}
if ts.run_id != run_id:
if ts.processing_on and ts.processing_on.address == worker:
return self._transition(key, "released", stimulus_id)
return {}, {}, {}
if ts.retries > 0:
ts.retries -= 1
return self._transition(key, "waiting", stimulus_id)
else:
return self._transition(
key,
"erred",
stimulus_id,
cause=key,
exception=exception,
traceback=traceback,
worker=worker,
**kwargs,
)
def stimulus_retry(
self, keys: Collection[Key], client: str | None = None
) -> tuple[Key, ...]:
logger.info("Client %s requests to retry %d keys", client, len(keys))
if client:
self.log_event(client, {"action": "retry", "count": len(keys)})
stack = list(keys)
seen = set()
roots = []
while stack:
key = stack.pop()
seen.add(key)
ts = self.tasks[key]
erred_deps = [dts.key for dts in ts.dependencies if dts.state == "erred"]
if erred_deps:
stack.extend(erred_deps)
else:
roots.append(key)
recommendations: Recs = {key: "waiting" for key in roots}
self.transitions(recommendations, f"stimulus-retry-{time()}")
if self.validate:
for key in seen:
assert not self.tasks[key].exception_blame
return tuple(seen)
def close_worker(self, worker: str) -> None:
"""Ask a worker to shut itself down. Do not wait for it to take effect.
Note that there is no guarantee that the worker will actually accept the
command.
Note that :meth:`remove_worker` sends the same command internally if close=True.
See also
--------
retire_workers
remove_worker
"""
if worker not in self.workers:
return
logger.info("Closing worker %s", worker)
self.log_event(worker, {"action": "close-worker"})
self.worker_send(worker, {"op": "close", "reason": "scheduler-close-worker"})
@_deprecated_kwarg("safe", "expected")
@log_errors
async def remove_worker(
self,
address: str,
*,
stimulus_id: str,
expected: bool = False,
close: bool = True,
) -> Literal["OK", "already-removed"]:
"""Remove worker from cluster.
We do this when a worker reports that it plans to leave or when it appears to be
unresponsive. This may send its tasks back to a released state.
See also
--------
retire_workers
close_worker
"""
if self.status == Status.closed:
return "already-removed"
address = self.coerce_address(address)
if address not in self.workers:
return "already-removed"
host = get_address_host(address)
ws = self.workers[address]
logger.info(
f"Remove worker addr: {ws.address} name: {ws.name} ({stimulus_id=})"
)
if close:
with suppress(AttributeError, CommClosedError):
self.stream_comms[address].send(
{"op": "close", "reason": "scheduler-remove-worker"}
)
self.remove_resources(address)
dh = self.host_info[host]
dh_addresses: set = dh["addresses"]
dh_addresses.remove(address)
dh["nthreads"] -= ws.nthreads
self.total_memory -= ws.memory_limit
self.total_nthreads -= ws.nthreads
self.total_nthreads_history.append((time(), self.total_nthreads))
if not dh_addresses:
del self.host_info[host]
self.rpc.remove(address)
del self.stream_comms[address]
del self.aliases[ws.name]
self.idle.pop(ws.address, None)
self.idle_task_count.discard(ws)
self.saturated.discard(ws)
del self.workers[address]
self._workers_removed_total += 1
ws.status = Status.closed
self.running.discard(ws)
recommendations: Recs = {}
timestamp = monotonic()
processing_keys = {ts.key for ts in ws.processing}
for ts in list(ws.processing):
k = ts.key
recommendations[k] = "released"
if not expected:
ts.suspicious += 1
ts.prefix.suspicious += 1
if ts.suspicious > self.allowed_failures:
del recommendations[k]
e = pickle.dumps(
KilledWorker(
task=k,
last_worker=ws.clean(),
allowed_failures=self.allowed_failures,
),
)
r = self.transition(
k,
"erred",
exception=e,
cause=k,
stimulus_id=stimulus_id,
worker=address,
)
recommendations.update(r)
logger.error(
"Task %s marked as failed because %d workers died"
" while trying to run it",
ts.key,
ts.suspicious,
)
recompute_keys = set()
lost_keys = set()
for ts in list(ws.has_what):
self.remove_replica(ts, ws)
if ts in ws.actors:
recommendations[ts.key] = "erred"
elif not ts.who_has:
if ts.run_spec:
recompute_keys.add(ts.key)
recommendations[ts.key] = "released"
else: # pure data
lost_keys.add(ts.key)
recommendations[ts.key] = "forgotten"
if recompute_keys:
logger.warning(
f"Removing worker {ws.address!r} caused the cluster to lose "
"already computed task(s), which will be recomputed elsewhere: "
f"{recompute_keys} ({stimulus_id=})"
)
if lost_keys:
logger.error(
f"Removing worker {ws.address!r} caused the cluster to lose scattered "
f"data, which can't be recovered: {lost_keys} ({stimulus_id=})"
)
event_msg = {
"action": "remove-worker",
"processing-tasks": processing_keys,
"lost-computed-tasks": recompute_keys,
"lost-scattered-tasks": lost_keys,
"stimulus_id": stimulus_id,
"expected": expected,
}
self.log_event(address, event_msg.copy())
event_msg["worker"] = address
self.log_event("all", event_msg)
self.transitions(recommendations, stimulus_id=stimulus_id)
# Make sure that the timestamp has been collected before tasks were transitioned to no-worker
# to ensure a meaningful error message.
self._refresh_no_workers_since(timestamp=timestamp)
awaitables = []
for plugin in list(self.plugins.values()):
try:
try:
result = plugin.remove_worker(
scheduler=self, worker=address, stimulus_id=stimulus_id
)
except TypeError:
parameters = inspect.signature(plugin.remove_worker).parameters
if "stimulus_id" not in parameters and not any(
p.kind is p.VAR_KEYWORD for p in parameters.values()
):
# Deprecated (see add_plugin)
result = plugin.remove_worker(scheduler=self, worker=address) # type: ignore
else:
raise
if inspect.isawaitable(result):
awaitables.append(result)
except Exception as e:
logger.exception(e)
plugin_msgs = await asyncio.gather(*awaitables, return_exceptions=True)
plugins_exceptions = [msg for msg in plugin_msgs if isinstance(msg, Exception)]
for exc in plugins_exceptions:
logger.exception(exc, exc_info=exc)
if not self.workers:
logger.info("Lost all workers")
for w in self.workers:
self.bandwidth_workers.pop((address, w), None)
self.bandwidth_workers.pop((w, address), None)
async def remove_worker_from_events() -> None:
# If the worker isn't registered anymore after the delay, remove from events
if address not in self.workers:
self._broker.truncate(address)
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self._ongoing_background_tasks.call_later(
cleanup_delay, remove_worker_from_events
)
logger.debug("Removed worker %s", ws)
for w in self.workers:
self.worker_send(
w,
{
"op": "remove-worker",
"worker": address,
"stimulus_id": stimulus_id,
},
)
return "OK"
def stimulus_cancel(
self, keys: Collection[Key], client: str, force: bool, reason: str, msg: str
) -> None:
"""Stop execution on a list of keys"""
logger.info("Client %s requests to cancel %d keys", client, len(keys))
self.log_event(client, {"action": "cancel", "count": len(keys), "force": force})
cs = self.clients.get(client)
if not cs:
return
cancelled_keys = []
clients = []
for key in keys:
ts = self.tasks.get(key)
if not ts:
continue
if force or ts.who_wants == {cs}: # no one else wants this key
if ts.dependents:
self.stimulus_cancel(
[dts.key for dts in ts.dependents],
client,
force=force,
reason=reason,
msg=msg,
)
logger.info("Scheduler cancels key %s. Force=%s", key, force)
cancelled_keys.append(key)
assert ts.who_wants
clients.extend(list(ts.who_wants) if force else [cs])
for cs in clients:
self.client_releases_keys(
keys=cancelled_keys,
client=cs.client_key,
stimulus_id=f"cancel-key-{time()}",
)
self.report(
{
"op": "cancelled-keys",
"keys": cancelled_keys,
"reason": reason,
"msg": msg,
}
)
def client_desires_keys(self, keys: Collection[Key], client: str) -> None:
cs = self.clients.get(client)
if cs is None:
# For publish, queues etc.
self.clients[client] = cs = ClientState(client)
for k in keys:
ts = self.tasks.get(k)
if ts is None:
warnings.warn(f"Client desires key {k!r} but key is unknown.")
continue
if ts.who_wants is None:
ts.who_wants = set()
ts.who_wants.add(cs)
cs.wants_what.add(ts)
if ts.state in ("memory", "erred"):
self.report_on_key(ts=ts, client=client)
def client_releases_keys(
self, keys: Collection[Key], client: str, stimulus_id: str | None = None
) -> None:
"""Remove keys from client desired list"""
stimulus_id = stimulus_id or f"client-releases-keys-{time()}"
if not isinstance(keys, list):
keys = list(keys)
cs = self.clients[client]
recommendations: Recs = {}
self._client_releases_keys(keys=keys, cs=cs, recommendations=recommendations)
self.transitions(recommendations, stimulus_id)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
def client_heartbeat(self, client: str) -> None:
"""Handle heartbeats from Client"""
cs = self.clients[client]
cs.last_seen = time()
self.client_comms[client].send(
{
"op": "adjust-heartbeat-interval",
# heartbeat_interval is used for workers
# We don't require the clients to heartbeat this often
"interval": heartbeat_interval(len(self.clients)) * 10,
}
)
###################
# Task Validation #
###################
def validate_released(self, key: Key) -> None:
ts = self.tasks[key]
assert ts.state == "released"
assert not ts.waiters
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any([ts in (dts.waiters or ()) for dts in ts.dependencies])
assert ts not in self.unrunnable
assert ts not in self.queued
def validate_waiting(self, key: Key) -> None:
ts = self.tasks[key]
assert ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert ts not in self.unrunnable
assert ts not in self.queued
for dts in ts.dependencies:
# We are waiting on a dependency iff it's not stored
assert bool(dts.who_has) != (dts in (ts.waiting_on or ()))
assert ts in (dts.waiters or ()) # XXX even if dts._who_has?
def validate_queued(self, key: Key) -> None:
ts = self.tasks[key]
assert ts in self.queued
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not (
ts.worker_restrictions or ts.host_restrictions or ts.resource_restrictions
)
for dts in ts.dependencies:
assert dts.who_has
assert ts in (dts.waiters or ())
def validate_processing(self, key: Key) -> None:
ts = self.tasks[key]
assert not ts.waiting_on
ws = ts.processing_on
assert ws
assert ts in ws.processing
assert not ts.who_has
assert ts not in self.queued
for dts in ts.dependencies:
assert dts.who_has or ()
assert ts in (dts.waiters or ())
def validate_memory(self, key: Key) -> None:
ts = self.tasks[key]
assert ts.who_has
assert bool(ts in self.replicated_tasks) == (len(ts.who_has) > 1)
assert not ts.processing_on
assert not ts.waiting_on
assert ts not in self.unrunnable
assert ts not in self.queued
for dts in ts.dependents:
assert (dts in (ts.waiters or ())) == (
dts.state in ("waiting", "queued", "processing", "no-worker")
)
assert ts not in (dts.waiting_on or ())
def validate_no_worker(self, key: Key) -> None:
ts = self.tasks[key]
assert ts in self.unrunnable
assert not ts.waiting_on
assert ts in self.unrunnable
assert not ts.processing_on
assert not ts.who_has
assert ts not in self.queued
for dts in ts.dependencies:
assert dts.who_has
def validate_erred(self, key: Key) -> None:
ts = self.tasks[key]
assert ts.exception_blame
assert not ts.who_has
assert ts not in self.queued
def validate_key(self, key: Key, ts: TaskState | None = None) -> None:
try:
if ts is None:
ts = self.tasks.get(key)
if ts is None:
logger.debug("Key lost: %s", key)
else:
ts.validate()
try:
func = getattr(self, "validate_" + ts.state.replace("-", "_"))
except AttributeError:
logger.error(
"self.validate_%s not found", ts.state.replace("-", "_")
)
else:
func(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self, allow_overlap: bool = False) -> None:
validate_state(self.tasks, self.workers, self.clients)
validate_unrunnable(self.unrunnable)
if not (set(self.workers) == set(self.stream_comms)):
raise ValueError("Workers not the same in all collections")
assert self.running.issuperset(self.idle.values()), (
self.running.copy(),
set(self.idle.values()),
)
assert self.running.issuperset(self.idle_task_count), (
self.running.copy(),
self.idle_task_count.copy(),
)
assert self.running.issuperset(self.saturated), (
self.running.copy(),
self.saturated.copy(),
)
assert self.saturated.isdisjoint(self.idle.values()), (
self.saturated.copy(),
set(self.idle.values()),
)
task_prefix_counts: defaultdict[str, int] = defaultdict(int)
for w, ws in self.workers.items():
assert isinstance(w, str), (type(w), w)
assert isinstance(ws, WorkerState), (type(ws), ws)
assert ws.address == w
if ws.status == Status.running:
assert ws in self.running
else:
assert ws not in self.running
assert ws.address not in self.idle
assert ws not in self.saturated
assert ws.long_running.issubset(ws.processing)
if not ws.processing:
assert not ws.occupancy
if ws.status == Status.running:
assert ws.address in self.idle
assert not ws.needs_what.keys() & ws.has_what
actual_needs_what: defaultdict[TaskState, int] = defaultdict(int)
for ts in ws.processing:
for tss in ts.dependencies:
if tss not in ws.has_what:
actual_needs_what[tss] += 1
assert actual_needs_what == ws.needs_what
assert (ws.status == Status.running) == (ws in self.running)
for name, count in ws.task_prefix_count.items():
task_prefix_counts[name] += count
assert task_prefix_counts.keys() == self._task_prefix_count_global.keys()
for name, global_count in self._task_prefix_count_global.items():
assert (
task_prefix_counts[name] == global_count
), f"{name}: {task_prefix_counts[name]} (wss), {global_count} (global)"
for ws in self.running:
assert ws.status == Status.running
assert ws.address in self.workers
for k, ts in self.tasks.items():
assert isinstance(ts, TaskState), (type(ts), ts)
assert ts.key == k
assert bool(ts in self.replicated_tasks) == (len(ts.who_has or ()) > 1)
self.validate_key(k, ts)
for ts in self.replicated_tasks:
assert ts.state == "memory"
assert ts.key in self.tasks
for c, cs in self.clients.items():
# client=None is often used in tests...
assert c is None or type(c) == str, (type(c), c)
assert type(cs) == ClientState, (type(cs), cs)
assert cs.client_key == c
a = {w: ws.nbytes for w, ws in self.workers.items()}
b = {
w: sum(ts.get_nbytes() for ts in ws.has_what)
for w, ws in self.workers.items()
}
assert a == b, (a, b)
if self.transition_counter_max:
assert self.transition_counter < self.transition_counter_max
###################
# Manage Messages #
###################
def report(
self, msg: dict, ts: TaskState | None = None, client: str | None = None
) -> None:
"""
Publish updates to all listening Queues and Comms
If the message contains a key then we only send the message to those
comms that care about the key.
"""
if ts is None:
msg_key = msg.get("key")
if msg_key is not None:
ts = self.tasks.get(msg_key)
if ts is None and client is None:
# Notify all clients
client_keys = list(self.client_comms)
elif ts is None:
assert client is not None
client_keys = [client]
else:
# Notify clients interested in key (including `client`)
# Note that, if report() was called by update_graph(), `client` won't be in
# ts.who_wants yet.
client_keys = [
cs.client_key for cs in ts.who_wants or () if cs.client_key != client
]
if client is not None:
client_keys.append(client)
for k in client_keys:
c = self.client_comms.get(k)
if c is None:
continue
try:
c.send(msg)
# logger.debug("Scheduler sends message to client %s: %s", k, msg)
except CommClosedError:
if self.status == Status.running:
logger.critical(
"Closed comm %r while trying to write %s", c, msg, exc_info=True
)
async def add_client(
self, comm: Comm, client: str, versions: dict[str, Any]
) -> None:
"""Add client to network
We listen to all future messages from this Comm.
"""
assert client is not None
comm.name = "Scheduler->Client"
logger.info("Receive client connection: %s", client)
self.log_event(["all", client], {"action": "add-client", "client": client})
self.clients[client] = ClientState(client, versions=versions)
self._client_connections_added_total += 1
for plugin in list(self.plugins.values()):
try:
plugin.add_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
try:
bcomm = BatchedSend(interval="2ms", loop=self.loop)
bcomm.start(comm)
self.client_comms[client] = bcomm
msg = {"op": "stream-start"}
version_warning = version_module.error_message(
version_module.get_versions(),
{w: ws.versions for w, ws in self.workers.items()},
versions,
)
msg.update(version_warning)
bcomm.send(msg)
try:
await self.handle_stream(comm=comm, extra={"client": client})
finally:
self.remove_client(client=client, stimulus_id=f"remove-client-{time()}")
logger.debug("Finished handling client %s", client)
finally:
if not comm.closed():
self.client_comms[client].send({"op": "stream-closed"})
try:
if not self._is_finalizing():
await self.client_comms[client].close()
del self.client_comms[client]
if self.status == Status.running:
logger.info("Close client connection: %s", client)
except TypeError: # comm becomes None during GC
pass
def remove_client(self, client: str, stimulus_id: str | None = None) -> None:
"""Remove client from network"""
stimulus_id = stimulus_id or f"remove-client-{time()}"
if self.status == Status.running:
logger.info("Remove client %s", client)
self.log_event(["all", client], {"action": "remove-client", "client": client})
try:
cs: ClientState = self.clients[client]
except KeyError:
# XXX is this a legitimate condition?
pass
else:
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what],
client=cs.client_key,
stimulus_id=stimulus_id,
)
del self.clients[client]
self._client_connections_removed_total += 1
for plugin in list(self.plugins.values()):
try:
plugin.remove_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
async def remove_client_from_events() -> None:
# If the client isn't registered anymore after the delay, remove from events
if client not in self.clients:
self._broker.truncate(client)
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
if not self._ongoing_background_tasks.closed:
self._ongoing_background_tasks.call_later(
cleanup_delay, remove_client_from_events
)
def send_task_to_worker(self, worker: str, ts: TaskState) -> None:
"""Send a single computational task to a worker"""
try:
msg = self._task_to_msg(ts)
self.worker_send(worker, msg)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_uncaught_error(self, **msg: Any) -> None:
logger.exception(clean_exception(**msg)[1])
def handle_task_finished(
self, key: Key, worker: str, stimulus_id: str, **msg: Any
) -> None:
if worker not in self.workers:
return
if self.validate:
self.validate_key(key)
r: tuple = self.stimulus_task_finished(
key=key, worker=worker, stimulus_id=stimulus_id, **msg
)
recommendations, client_msgs, worker_msgs = r
self._transitions(recommendations, client_msgs, worker_msgs, stimulus_id)
self.send_all(client_msgs, worker_msgs)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
def handle_task_erred(self, key: Key, stimulus_id: str, **msg: Any) -> None:
r: tuple = self.stimulus_task_erred(key=key, stimulus_id=stimulus_id, **msg)
recommendations, client_msgs, worker_msgs = r
self._transitions(recommendations, client_msgs, worker_msgs, stimulus_id)
self.send_all(client_msgs, worker_msgs)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
def release_worker_data(self, key: Key, worker: str, stimulus_id: str) -> None:
ts = self.tasks.get(key)
ws = self.workers.get(worker)
if not ts or not ws or ws not in (ts.who_has or ()):
return
self.remove_replica(ts, ws)
if not ts.who_has:
self.transitions({key: "released"}, stimulus_id)
def handle_long_running(
self,
key: Key,
worker: str,
run_id: int,
compute_duration: float | None,
stimulus_id: str,
) -> None:
"""A task has seceded from the thread pool
We stop the task from being stolen in the future, and change task
duration accounting as if the task has stopped.
"""
if worker not in self.workers:
logger.debug(
"Received long-running signal from unknown worker %s. Ignoring.", worker
)
return
if key not in self.tasks:
logger.debug("Skipping long_running since key %s was already released", key)
return
ts = self.tasks[key]
ws = ts.processing_on
if ws is None:
logger.debug("Received long-running signal from duplicate task. Ignoring.")
return
if ws.address != worker or ts.run_id != run_id:
logger.debug(
"Received stale long-running signal from worker %s for task %s. Ignoring.",
worker,
ts,
)
return
steal = self.extensions.get("stealing")
if steal is not None:
steal.remove_key_from_stealable(ts)
if compute_duration is not None:
old_duration = ts.prefix.duration_average
if old_duration < 0:
ts.prefix.duration_average = compute_duration
else:
ts.prefix.duration_average = (old_duration + compute_duration) / 2
ws.add_to_long_running(ts)
self.check_idle_saturated(ws)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
def handle_worker_status_change(
self, status: str | Status, worker: str | WorkerState, stimulus_id: str
) -> None:
ws = self.workers.get(worker) if isinstance(worker, str) else worker
if not ws:
return
prev_status = ws.status
ws.status = Status[status] if isinstance(status, str) else status
if ws.status == prev_status:
return
self.log_event(
ws.address,
{
"action": "worker-status-change",
"prev-status": prev_status.name,
"status": ws.status.name,
"stimulus_id": stimulus_id,
},
)
logger.debug(f"Worker status {prev_status.name} -> {status} - {ws}")
if ws.status == Status.running:
self.running.add(ws)
self.check_idle_saturated(ws)
self.transitions(
self.bulk_schedule_unrunnable_after_adding_worker(ws), stimulus_id
)
self.stimulus_queue_slots_maybe_opened(stimulus_id=stimulus_id)
else:
self.running.discard(ws)
self.idle.pop(ws.address, None)
self.idle_task_count.discard(ws)
self.saturated.discard(ws)
self._refresh_no_workers_since()
def handle_request_refresh_who_has(
self, keys: Iterable[Key], worker: str, stimulus_id: str
) -> None:
"""Request from a Worker to refresh the
who_has for some keys. Not to be confused with scheduler.who_has, which
is a dedicated comm RPC request from a Client.
"""
who_has = {}
free_keys = []
for key in keys:
if key in self.tasks:
who_has[key] = [ws.address for ws in self.tasks[key].who_has or ()]
else:
free_keys.append(key)
if who_has:
self.stream_comms[worker].send(
{
"op": "refresh-who-has",
"who_has": who_has,
"stimulus_id": stimulus_id,
}
)
if free_keys:
self.stream_comms[worker].send(
{
"op": "free-keys",
"keys": free_keys,
"stimulus_id": stimulus_id,
}
)
async def handle_worker(self, comm: Comm, worker: str) -> None:
"""
Listen to responses from a single worker
This is the main loop for scheduler-worker interaction
See Also
--------
Scheduler.handle_client: Equivalent coroutine for clients
"""
comm.name = "Scheduler connection to worker"
worker_comm = self.stream_comms[worker]
worker_comm.start(comm)
logger.info("Starting worker compute stream, %s", worker)
try:
await self.handle_stream(comm=comm, extra={"worker": worker})
finally:
if worker in self.stream_comms:
worker_comm.abort()
await self.remove_worker(
worker, stimulus_id=f"handle-worker-cleanup-{time()}"
)
def add_plugin(
self,
plugin: SchedulerPlugin,
*,
idempotent: bool = False,
name: str | None = None,
**kwargs: Any,
) -> None:
"""Add external plugin to scheduler.
See https://distributed.readthedocs.io/en/latest/plugins.html
Parameters
----------
plugin : SchedulerPlugin
SchedulerPlugin instance to add
idempotent : bool
If true, the plugin is assumed to already exist and no
action is taken.
name : str
A name for the plugin, if None, the name attribute is
checked on the Plugin instance and generated if not
discovered.
"""
if name is None:
name = _get_plugin_name(plugin)
if name in self.plugins:
if idempotent:
return
warnings.warn(
f"Scheduler already contains a plugin with name {name}; overwriting.",
category=UserWarning,
)
parameters = inspect.signature(plugin.remove_worker).parameters
if not any(p.kind is p.VAR_KEYWORD for p in parameters.values()):
warnings.warn(
"The signature of `SchedulerPlugin.remove_worker` now requires `**kwargs` "
"to ensure that plugins remain forward-compatible. Not including "
"`**kwargs` in the signature will no longer be supported in future versions.",
FutureWarning,
)
self.plugins[name] = plugin
def remove_plugin(self, name: str | None = None) -> None:
"""Remove external plugin from scheduler
Parameters
----------
name : str
Name of the plugin to remove
"""
assert name is not None
try:
del self.plugins[name]
except KeyError:
raise ValueError(
f"Could not find plugin {name!r} among the current scheduler plugins"
)
async def register_scheduler_plugin(
self,
plugin: bytes | SchedulerPlugin,
name: str | None = None,
idempotent: bool | None = None,
) -> None:
"""Register a plugin on the scheduler."""
if idempotent is None:
warnings.warn(
"The signature of `Scheduler.register_scheduler_plugin` now requires "
"`idempotent`. Not including `idempotent` in the signature will no longer "
"be supported in future versions.",
FutureWarning,
)
idempotent = False
if not isinstance(plugin, SchedulerPlugin):
plugin = loads(plugin)
assert isinstance(plugin, SchedulerPlugin)
if name is None:
name = _get_plugin_name(plugin)
if name in self.plugins and idempotent:
return
if hasattr(plugin, "start"):
result = plugin.start(self)
if inspect.isawaitable(result):
await result
self.add_plugin(plugin, name=name, idempotent=idempotent)
async def unregister_scheduler_plugin(self, name: str) -> None:
"""Unregister a plugin on the scheduler."""
self.remove_plugin(name)
def worker_send(self, worker: str, msg: dict[str, Any]) -> None:
"""Send message to worker
This also handles connection failures by adding a callback to remove
the worker on the next cycle.
"""
try:
self.stream_comms[worker].send(msg)
except (CommClosedError, AttributeError):
self._ongoing_background_tasks.call_soon(
self.remove_worker, # type: ignore[arg-type]
address=worker,
stimulus_id=f"worker-send-comm-fail-{time()}",
)
def client_send(self, client: str, msg: dict) -> None:
"""Send message to client"""
c = self.client_comms.get(client)
if c is None:
return
try:
c.send(msg)
except CommClosedError:
if self.status == Status.running:
logger.critical(
"Closed comm %r while trying to write %s", c, msg, exc_info=True
)
def send_all(self, client_msgs: Msgs, worker_msgs: Msgs) -> None:
"""Send messages to client and workers"""
for client, msgs in client_msgs.items():
c = self.client_comms.get(client)
if c is None:
continue
try:
c.send(*msgs)
except CommClosedError:
if self.status == Status.running:
logger.critical(
"Closed comm %r while trying to write %s",
c,
msgs,
exc_info=True,
)
for worker, msgs in worker_msgs.items():
try:
w = self.stream_comms[worker]
w.send(*msgs)
except KeyError:
# worker already gone
pass
except (CommClosedError, AttributeError):
self._ongoing_background_tasks.call_soon(
self.remove_worker, # type: ignore[arg-type]
address=worker,
stimulus_id=f"send-all-comm-fail-{time()}",
)
############################
# Less common interactions #
############################
async def scatter(
self,
data: dict,
workers: Iterable | None,
client: str,
broadcast: bool = False,
timeout: float = 2,
) -> list[Key]:
"""Send data out to workers
See also
--------
Scheduler.broadcast:
"""
start = time()
while True:
if workers is None:
wss = self.running
else:
workers = [self.coerce_address(w) for w in workers]
wss = {self.workers[w] for w in workers}
wss = {ws for ws in wss if ws.status == Status.running}
if wss:
break
if time() > start + timeout:
raise TimeoutError("No valid workers found")
await asyncio.sleep(0.1)
assert isinstance(data, dict)
workers = list(ws.address for ws in wss)
keys, who_has, nbytes = await scatter_to_workers(workers, data, rpc=self.rpc)
self.update_data(who_has=who_has, nbytes=nbytes, client=client)
if broadcast:
n = len(workers) if broadcast is True else broadcast
await self.replicate(keys=keys, workers=workers, n=n)
self.log_event(
[client, "all"], {"action": "scatter", "client": client, "count": len(data)}
)
return keys
async def gather(
self, keys: Collection[Key], serializers: list[str] | None = None
) -> dict[Key, object]:
"""Collect data from workers to the scheduler"""
data = {}
missing_keys = list(keys)
failed_keys: list[Key] = []
missing_workers: set[str] = set()
while missing_keys:
who_has = {}
for key, workers in self.get_who_has(missing_keys).items():
valid_workers = set(workers) - missing_workers
if valid_workers:
who_has[key] = valid_workers
else:
failed_keys.append(key)
(
new_data,
missing_keys,
new_failed_keys,
new_missing_workers,
) = await gather_from_workers(
who_has, rpc=self.rpc, serializers=serializers
)
data.update(new_data)
failed_keys += new_failed_keys
missing_workers.update(new_missing_workers)
self.log_event("all", {"action": "gather", "count": len(keys)})
if not failed_keys:
return {"status": "OK", "data": data}
failed_states = {
key: self.tasks[key].state if key in self.tasks else "forgotten"
for key in failed_keys
}
logger.error("Couldn't gather keys: %s", failed_states)
return {"status": "error", "keys": list(failed_keys)}
@log_errors
async def restart(
self,
*,
client: str | None = None,
timeout: float = 30,
wait_for_workers: bool = True,
stimulus_id: str,
) -> None:
"""Forget all tasks and call restart_workers on all workers.
Parameters
----------
timeout:
See restart_workers
wait_for_workers:
See restart_workers
See also
--------
Client.restart
Client.restart_workers
Scheduler.restart_workers
"""
logger.info(f"Restarting workers and releasing all keys ({stimulus_id=})")
for cs in self.clients.values():
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what],
client=cs.client_key,
stimulus_id=stimulus_id,
)
self._clear_task_state()
assert not self.tasks
self.report({"op": "restart"})
for plugin in list(self.plugins.values()):
try:
plugin.restart(self)
except Exception as e:
logger.exception(e)
await self.restart_workers(
client=client,
timeout=timeout,
wait_for_workers=wait_for_workers,
stimulus_id=stimulus_id,
)
@log_errors
async def restart_workers(
self,
workers: list[str] | None = None,
*,
client: str | None = None,
timeout: float = 30,
wait_for_workers: bool = True,
on_error: Literal["raise", "return"] = "raise",
stimulus_id: str,
) -> dict[str, Literal["OK", "removed", "timed out"]]:
"""Restart selected workers. Optionally wait for workers to return.
Workers without nannies are shut down, hoping an external deployment system
will restart them. Therefore, if not using nannies and your deployment system
does not automatically restart workers, ``restart`` will just shut down all
workers, then time out!
After ``restart``, all connected workers are new, regardless of whether
``TimeoutError`` was raised. Any workers that failed to shut down in time are
removed, and may or may not shut down on their own in the future.
Parameters
----------
workers:
List of worker addresses to restart. If omitted, restart all workers.
timeout:
How long to wait for workers to shut down and come back, if ``wait_for_workers``
is True, otherwise just how long to wait for workers to shut down.
Raises ``asyncio.TimeoutError`` if this is exceeded.
wait_for_workers:
Whether to wait for all workers to reconnect, or just for them to shut down
(default True). Use ``restart(wait_for_workers=False)`` combined with
:meth:`Client.wait_for_workers` for granular control over how many workers to
wait for.
on_error:
If 'raise' (the default), raise if any nanny times out while restarting the
worker. If 'return', return error messages.
Returns
-------
{worker address: "OK", "no nanny", or "timed out" or error message}
See also
--------
Client.restart
Client.restart_workers
Scheduler.restart
"""
n_workers = len(self.workers)
if workers is None:
workers = list(self.workers)
logger.info(f"Restarting all workers ({stimulus_id=}")
else:
workers = list(set(workers).intersection(self.workers))
logger.info(f"Restarting {len(workers)} workers: {workers} ({stimulus_id=}")
nanny_workers = {
addr: self.workers[addr].nanny
for addr in workers
if self.workers[addr].nanny
}
# Close non-Nanny workers. We have no way to restart them, so we just let them
# go, and assume a deployment system is going to restart them for us.
no_nanny_workers = [addr for addr in workers if addr not in nanny_workers]
if no_nanny_workers:
logger.warning(
f"Workers {no_nanny_workers} do not use a nanny and will be terminated "
"without restarting them"
)
await asyncio.gather(
*(
self.remove_worker(address=addr, stimulus_id=stimulus_id)
for addr in no_nanny_workers
)
)
out: dict[str, Literal["OK", "removed", "timed out"]]
out = {addr: "removed" for addr in no_nanny_workers}
deadline = Deadline.after(timeout)
logger.debug("Send kill signal to nannies: %s", nanny_workers)
async with contextlib.AsyncExitStack() as stack:
nannies = await asyncio.gather(
*(
stack.enter_async_context(
rpc(nanny_address, connection_args=self.connection_args)
)
for nanny_address in nanny_workers.values()
)
)
resps = await asyncio.gather(
*(
wait_for(
# FIXME does not raise if the process fails to shut down,
# see https://github.com/dask/distributed/pull/6427/files#r894917424
# NOTE: Nanny will automatically restart worker process when it's killed
# NOTE: Don't propagate timeout to kill(): we don't want to
# spend (.8*.8)=64% of our end-to-end timeout waiting for a hung
# process to restart.
nanny.kill(reason=stimulus_id),
timeout,
)
for nanny in nannies
),
return_exceptions=True,
)
# NOTE: the `WorkerState` entries for these workers will be removed
# naturally when they disconnect from the scheduler.
# Remove any workers that failed to shut down, so we can guarantee
# that after `restart`, there are no old workers around.
bad_nannies = set()
for addr, resp in zip(nanny_workers, resps):
if resp is None:
out[addr] = "OK"
elif isinstance(resp, (OSError, TimeoutError)):
bad_nannies.add(addr)
out[addr] = "timed out"
else: # pragma: nocover
raise resp
if bad_nannies:
logger.error(
f"Workers {list(bad_nannies)} did not shut down within {timeout}s; "
"force closing"
)
await asyncio.gather(
*(
self.remove_worker(addr, stimulus_id=stimulus_id)
for addr in bad_nannies
)
)
if on_error == "raise":
raise TimeoutError(
f"{len(bad_nannies)}/{len(nannies)} nanny worker(s) did not "
f"shut down within {timeout}s: {bad_nannies}"
)
if client:
self.log_event(client, {"action": "restart-workers", "workers": workers})
self.log_event(
"all", {"action": "restart-workers", "workers": workers, "client": client}
)
if not wait_for_workers:
logger.info(
"Workers restart finished (did not wait for new workers) "
f"({stimulus_id=}"
)
return out
# NOTE: if new (unrelated) workers join while we're waiting, we may return
# before our shut-down workers have come back up. That's fine; workers are
# interchangeable.
while not deadline.expired and len(self.workers) < n_workers:
await asyncio.sleep(0.2)
if len(self.workers) >= n_workers:
logger.info(f"Workers restart finished ({stimulus_id=}")
return out
msg = (
f"Waited for {len(workers)} worker(s) to reconnect after restarting but, "
f"after {timeout}s, {n_workers - len(self.workers)} have not returned. "
"Consider a longer timeout, or `wait_for_workers=False`."
)
if no_nanny_workers:
msg += (
f" The {len(no_nanny_workers)} worker(s) not using Nannies were just shut "
"down instead of restarted (restart is only possible with Nannies). If "
"your deployment system does not automatically re-launch terminated "
"processes, then those workers will never come back, and `Client.restart` "
"will always time out. Do not use `Client.restart` in that case."
)
if on_error == "raise":
raise TimeoutError(msg)
logger.error(f"{msg} ({stimulus_id=})")
new_nannies = {ws.nanny for ws in self.workers.values() if ws.nanny}
for worker_addr, nanny_addr in nanny_workers.items():
if nanny_addr not in new_nannies:
out[worker_addr] = "timed out"
return out
async def broadcast(
self,
*,
msg: dict,
workers: Collection[str] | None = None,
hosts: Collection[str] | None = None,
nanny: bool = False,
serializers: Any = None,
on_error: Literal["raise", "return", "return_pickle", "ignore"] = "raise",
) -> dict[str, Any]:
"""Broadcast message to workers, return all results"""
if workers is None:
if hosts is None:
workers = list(self.workers)
else:
workers = []
else:
workers = list(workers)
if hosts is not None:
for host in hosts:
dh = self.host_info.get(host)
if dh is not None:
workers.extend(dh["addresses"])
if nanny:
addresses = [n for w in workers if (n := self.workers[w].nanny) is not None]
else:
addresses = workers
ERROR = object()
reuse_broadcast_comm = dask.config.get(
"distributed.scheduler.reuse-broadcast-comm", False
)
close = not reuse_broadcast_comm
async def send_message(addr: str) -> Any:
try:
comm = await self.rpc.connect(addr)
comm.name = "Scheduler Broadcast"
try:
resp = await send_recv(
comm, close=close, serializers=serializers, **msg
)
finally:
self.rpc.reuse(addr, comm)
return resp
except Exception as e:
logger.error(f"broadcast to {addr} failed: {e.__class__.__name__}: {e}")
if on_error == "raise":
raise
elif on_error == "return":
return e
elif on_error == "return_pickle":
return dumps(e)
elif on_error == "ignore":
return ERROR
else:
raise ValueError(
"on_error must be 'raise', 'return', 'return_pickle', "
f"or 'ignore'; got {on_error!r}"
)
results = await All([send_message(address) for address in addresses])
return {k: v for k, v in zip(workers, results) if v is not ERROR}
async def proxy(
self,
msg: dict,
worker: str,
serializers: Any = None,
) -> Any:
"""Proxy a communication through the scheduler to some other worker"""
d = await self.broadcast(msg=msg, workers=[worker], serializers=serializers)
return d[worker]
async def gather_on_worker(
self, worker_address: str, who_has: dict[Key, list[str]]
) -> set:
"""Peer-to-peer copy of keys from multiple workers to a single worker
Parameters
----------
worker_address: str
Recipient worker address to copy keys to
who_has: dict[Key, list[str]]
{key: [sender address, sender address, ...], key: ...}
Returns
-------
returns:
set of keys that failed to be copied
"""
try:
result = await retry_operation(
self.rpc(addr=worker_address).gather, who_has=who_has
)
except OSError as e:
# This can happen e.g. if the worker is going through controlled shutdown;
# it doesn't necessarily mean that it went unexpectedly missing
logger.warning(
f"Communication with worker {worker_address} failed during "
f"replication: {e.__class__.__name__}: {e}"
)
return set(who_has)
ws = self.workers.get(worker_address)
if not ws:
logger.warning(f"Worker {worker_address} lost during replication")
return set(who_has)
elif result["status"] == "OK":
keys_failed = set()
keys_ok: Set = who_has.keys()
elif result["status"] == "partial-fail":
keys_failed = set(result["keys"])
keys_ok = who_has.keys() - keys_failed
logger.warning(
f"Worker {worker_address} failed to acquire keys: {result['keys']}"
)
else: # pragma: nocover
raise ValueError(f"Unexpected message from {worker_address}: {result}")
for key in keys_ok:
ts = self.tasks.get(key)
if ts is None or ts.state != "memory":
logger.warning(f"Key lost during replication: {key}")
continue
self.add_replica(ts, ws)
return keys_failed
async def delete_worker_data(
self, worker_address: str, keys: Collection[Key], stimulus_id: str
) -> None:
"""Delete data from a worker and update the corresponding worker/task states
Parameters
----------
worker_address: str
Worker address to delete keys from
keys: list[Key]
List of keys to delete on the specified worker
"""
try:
await retry_operation(
self.rpc(addr=worker_address).free_keys,
keys=list(keys),
stimulus_id=f"delete-data-{time()}",
)
except OSError as e:
# This can happen e.g. if the worker is going through controlled shutdown;
# it doesn't necessarily mean that it went unexpectedly missing
logger.warning(
f"Communication with worker {worker_address} failed during "
f"replication: {e.__class__.__name__}: {e}"
)
return
ws = self.workers.get(worker_address)
if not ws:
return
for key in keys:
ts = self.tasks.get(key)
if ts is not None and ws in (ts.who_has or ()):
assert ts.state == "memory"
self.remove_replica(ts, ws)
if not ts.who_has:
# Last copy deleted
self.transitions({key: "released"}, stimulus_id)
self.log_event(ws.address, {"action": "remove-worker-data", "keys": keys})
@log_errors
async def rebalance(
self,
keys: Iterable[Key] | None = None,
workers: Iterable[str] | None = None,
stimulus_id: str | None = None,
) -> dict:
"""Rebalance keys so that each worker ends up with roughly the same process
memory (managed+unmanaged).
.. warning::
This operation is generally not well tested against normal operation of the
scheduler. It is not recommended to use it while waiting on computations.
**Algorithm**
#. Find the mean occupancy of the cluster, defined as data managed by dask +
unmanaged process memory that has been there for at least 30 seconds
(``distributed.worker.memory.recent-to-old-time``).
This lets us ignore temporary spikes caused by task heap usage.
Alternatively, you may change how memory is measured both for the individual
workers as well as to calculate the mean through
``distributed.worker.memory.rebalance.measure``. Namely, this can be useful
to disregard inaccurate OS memory measurements.
#. Discard workers whose occupancy is within 5% of the mean cluster occupancy
(``distributed.worker.memory.rebalance.sender-recipient-gap`` / 2).
This helps avoid data from bouncing around the cluster repeatedly.
#. Workers above the mean are senders; those below are recipients.
#. Discard senders whose absolute occupancy is below 30%
(``distributed.worker.memory.rebalance.sender-min``). In other words, no data
is moved regardless of imbalancing as long as all workers are below 30%.
#. Discard recipients whose absolute occupancy is above 60%
(``distributed.worker.memory.rebalance.recipient-max``).
Note that this threshold by default is the same as
``distributed.worker.memory.target`` to prevent workers from accepting data
and immediately spilling it out to disk.
#. Iteratively pick the sender and recipient that are farthest from the mean and
move the *least recently inserted* key between the two, until either all
senders or all recipients fall within 5% of the mean.
A recipient will be skipped if it already has a copy of the data. In other
words, this method does not degrade replication.
A key will be skipped if there are no recipients available with enough memory
to accept the key and that don't already hold a copy.
The least recently insertd (LRI) policy is a greedy choice with the advantage of
being O(1), trivial to implement (it relies on python dict insertion-sorting)
and hopefully good enough in most cases. Discarded alternative policies were:
- Largest first. O(n*log(n)) save for non-trivial additional data structures and
risks causing the largest chunks of data to repeatedly move around the
cluster like pinballs.
- Least recently used (LRU). This information is currently available on the
workers only and not trivial to replicate on the scheduler; transmitting it
over the network would be very expensive. Also, note that dask will go out of
its way to minimise the amount of time intermediate keys are held in memory,
so in such a case LRI is a close approximation of LRU.
Parameters
----------
keys: optional
allowlist of dask keys that should be considered for moving. All other keys
will be ignored. Note that this offers no guarantee that a key will actually
be moved (e.g. because it is unnecessary or because there are no viable
recipient workers for it).
workers: optional
allowlist of workers addresses to be considered as senders or recipients.
All other workers will be ignored. The mean cluster occupancy will be
calculated only using the allowed workers.
"""
stimulus_id = stimulus_id or f"rebalance-{time()}"
wss: Collection[WorkerState]
if workers is not None:
wss = [self.workers[w] for w in workers]
else:
wss = self.workers.values()
if not wss:
return {"status": "OK"}
if keys is not None:
if not isinstance(keys, Set):
keys = set(keys) # unless already a set-like
if not keys:
return {"status": "OK"}
missing_data = [
k for k in keys if k not in self.tasks or not self.tasks[k].who_has
]
if missing_data:
return {"status": "partial-fail", "keys": missing_data}
msgs = self._rebalance_find_msgs(keys, wss)
if not msgs:
return {"status": "OK"}
# Downgrade reentrant lock to non-reentrant
async with self._replica_lock(("rebalance", object())):
result = await self._rebalance_move_data(msgs, stimulus_id)
if result["status"] == "partial-fail" and keys is None:
# Only return failed keys if the client explicitly asked for them
result = {"status": "OK"}
return result
def _rebalance_find_msgs(
self,
keys: Set[Hashable] | None,
workers: Iterable[WorkerState],
) -> list[tuple[WorkerState, WorkerState, TaskState]]:
"""Identify workers that need to lose keys and those that can receive them,
together with how many bytes each needs to lose/receive. Then, pair a sender
worker with a recipient worker for each key, until the cluster is rebalanced.
This method only defines the work to be performed; it does not start any network
transfers itself.
The big-O complexity is O(wt + ke*log(we)), where
- wt is the total number of workers on the cluster (or the number of allowed
workers, if explicitly stated by the user)
- we is the number of workers that are eligible to be senders or recipients
- kt is the total number of keys on the cluster (or on the allowed workers)
- ke is the number of keys that need to be moved in order to achieve a balanced
cluster
There is a degenerate edge case O(wt + kt*log(we)) when kt is much greater than
the number of allowed keys, or when most keys are replicated or cannot be
moved for some other reason.
Returns list of tuples to feed into _rebalance_move_data:
- sender worker
- recipient worker
- task to be transferred
"""
# Heaps of workers, managed by the heapq module, that need to send/receive data,
# with how many bytes each needs to send/receive.
#
# Each element of the heap is a tuple constructed as follows:
# - snd_bytes_max/rec_bytes_max: maximum number of bytes to send or receive.
# This number is negative, so that the workers farthest from the cluster mean
# are at the top of the smallest-first heaps.
# - snd_bytes_min/rec_bytes_min: minimum number of bytes after sending/receiving
# which the worker should not be considered anymore. This is also negative.
# - arbitrary unique number, there just to to make sure that WorkerState objects
# are never used for sorting in the unlikely event that two processes have
# exactly the same number of bytes allocated.
# - WorkerState
# - iterator of all tasks in memory on the worker (senders only), insertion
# sorted (least recently inserted first).
# Note that this iterator will typically *not* be exhausted. It will only be
# exhausted if, after moving away from the worker all keys that can be moved,
# is insufficient to drop snd_bytes_min above 0.
senders: list[tuple[int, int, int, WorkerState, Iterator[TaskState]]] = []
recipients: list[tuple[int, int, int, WorkerState]] = []
# Output: [(sender, recipient, task), ...]
msgs: list[tuple[WorkerState, WorkerState, TaskState]] = []
# By default, this is the optimistic memory, meaning total process memory minus
# unmanaged memory that appeared over the last 30 seconds
# (distributed.worker.memory.recent-to-old-time).
# This lets us ignore temporary spikes caused by task heap usage.
memory_by_worker = [
(ws, getattr(ws.memory, self.MEMORY_REBALANCE_MEASURE)) for ws in workers
]
mean_memory = sum(m for _, m in memory_by_worker) // len(memory_by_worker)
for ws, ws_memory in memory_by_worker:
if ws.memory_limit:
half_gap = int(self.MEMORY_REBALANCE_HALF_GAP * ws.memory_limit)
sender_min = self.MEMORY_REBALANCE_SENDER_MIN * ws.memory_limit
recipient_max = self.MEMORY_REBALANCE_RECIPIENT_MAX * ws.memory_limit
else:
half_gap = 0
sender_min = 0.0
recipient_max = math.inf
if (
ws._has_what
and ws_memory >= mean_memory + half_gap
and ws_memory >= sender_min
):
# This may send the worker below sender_min (by design)
snd_bytes_max = mean_memory - ws_memory # negative
snd_bytes_min = snd_bytes_max + half_gap # negative
# See definition of senders above
senders.append(
(snd_bytes_max, snd_bytes_min, id(ws), ws, iter(ws._has_what))
)
elif ws_memory < mean_memory - half_gap and ws_memory < recipient_max:
# This may send the worker above recipient_max (by design)
rec_bytes_max = ws_memory - mean_memory # negative
rec_bytes_min = rec_bytes_max + half_gap # negative
# See definition of recipients above
recipients.append((rec_bytes_max, rec_bytes_min, id(ws), ws))
# Fast exit in case no transfers are necessary or possible
if not senders or not recipients:
self.log_event(
"all",
{
"action": "rebalance",
"senders": len(senders),
"recipients": len(recipients),
"moved_keys": 0,
},
)
return []
heapq.heapify(senders)
heapq.heapify(recipients)
while senders and recipients:
snd_bytes_max, snd_bytes_min, _, snd_ws, ts_iter = senders[0]
# Iterate through tasks in memory, least recently inserted first
for ts in ts_iter:
if keys is not None and ts.key not in keys:
continue
nbytes = ts.nbytes
if nbytes + snd_bytes_max > 0:
# Moving this task would cause the sender to go below mean and
# potentially risk becoming a recipient, which would cause tasks to
# bounce around. Move on to the next task of the same sender.
continue
# Find the recipient, farthest from the mean, which
# 1. has enough available RAM for this task, and
# 2. doesn't hold a copy of this task already
# There may not be any that satisfies these conditions; in this case
# this task won't be moved.
skipped_recipients = []
use_recipient = False
while recipients and not use_recipient:
rec_bytes_max, rec_bytes_min, _, rec_ws = recipients[0]
if nbytes + rec_bytes_max > 0:
# recipients are sorted by rec_bytes_max.
# The next ones will be worse; no reason to continue iterating
break
use_recipient = ts not in rec_ws._has_what
if not use_recipient:
skipped_recipients.append(heapq.heappop(recipients))
for recipient in skipped_recipients:
heapq.heappush(recipients, recipient)
if not use_recipient:
# This task has no recipients available. Leave it on the sender and
# move on to the next task of the same sender.
continue
# Schedule task for transfer from sender to recipient
msgs.append((snd_ws, rec_ws, ts))
# *_bytes_max/min are all negative for heap sorting
snd_bytes_max += nbytes
snd_bytes_min += nbytes
rec_bytes_max += nbytes
rec_bytes_min += nbytes
# Stop iterating on the tasks of this sender for now and, if it still
# has bytes to lose, push it back into the senders heap; it may or may
# not come back on top again.
if snd_bytes_min < 0:
# See definition of senders above
heapq.heapreplace(
senders,
(snd_bytes_max, snd_bytes_min, id(snd_ws), snd_ws, ts_iter),
)
else:
heapq.heappop(senders)
# If recipient still has bytes to gain, push it back into the recipients
# heap; it may or may not come back on top again.
if rec_bytes_min < 0:
# See definition of recipients above
heapq.heapreplace(
recipients,
(rec_bytes_max, rec_bytes_min, id(rec_ws), rec_ws),
)
else:
heapq.heappop(recipients)
# Move to next sender with the most data to lose.
# It may or may not be the same sender again.
break
else: # for ts in ts_iter
# Exhausted tasks on this sender
heapq.heappop(senders)
return msgs
async def _rebalance_move_data(
self, msgs: list[tuple[WorkerState, WorkerState, TaskState]], stimulus_id: str
) -> dict:
"""Perform the actual transfer of data across the network in rebalance().
Takes in input the output of _rebalance_find_msgs(), that is a list of tuples:
- sender worker
- recipient worker
- task to be transferred
FIXME this method is not robust when the cluster is not idle.
"""
# {recipient address: {key: [sender address, ...]}}
to_recipients: defaultdict[str, defaultdict[Key, list[str]]] = defaultdict(
lambda: defaultdict(list)
)
for snd_ws, rec_ws, ts in msgs:
to_recipients[rec_ws.address][ts.key].append(snd_ws.address)
failed_keys_by_recipient = dict(
zip(
to_recipients,
await asyncio.gather(
*(
# Note: this never raises exceptions
self.gather_on_worker(w, who_has)
for w, who_has in to_recipients.items()
)
),
)
)
to_senders = defaultdict(list)
for snd_ws, rec_ws, ts in msgs:
if ts.key not in failed_keys_by_recipient[rec_ws.address]:
to_senders[snd_ws.address].append(ts.key)
# Note: this never raises exceptions
await asyncio.gather(
*(self.delete_worker_data(r, v, stimulus_id) for r, v in to_senders.items())
)
for r, v in to_recipients.items():
self.log_event(r, {"action": "rebalance", "who_has": v})
self.log_event(
"all",
{
"action": "rebalance",
"senders": valmap(len, to_senders),
"recipients": valmap(len, to_recipients),
"moved_keys": len(msgs),
},
)
missing_keys = {k for r in failed_keys_by_recipient.values() for k in r}
if missing_keys:
return {"status": "partial-fail", "keys": list(missing_keys)}
else:
return {"status": "OK"}
async def replicate(
self,
keys: list[Key],
n: int | None = None,
workers: Iterable | None = None,
branching_factor: int = 2,
delete: bool = True,
stimulus_id: str | None = None,
) -> dict | None:
"""Replicate data throughout cluster
This performs a tree copy of the data throughout the network
individually on each piece of data.
Parameters
----------
keys: Iterable
list of keys to replicate
n: int
Number of replications we expect to see within the cluster
branching_factor: int, optional
The number of workers that can copy data in each generation.
The larger the branching factor, the more data we copy in
a single step, but the more a given worker risks being
swamped by data requests.
See also
--------
Scheduler.rebalance
"""
stimulus_id = stimulus_id or f"replicate-{time()}"
assert branching_factor > 0
# Downgrade reentrant lock to non-reentrant
async with self._replica_lock(("replicate", object())):
if workers is not None:
workers = {self.workers[w] for w in self.workers_list(workers)}
workers = {ws for ws in workers if ws.status == Status.running}
else:
workers = self.running
if n is None:
n = len(workers)
else:
n = min(n, len(workers))
if n == 0:
raise ValueError("Can not use replicate to delete data")
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "partial-fail", "keys": missing_data}
# Delete extraneous data
if delete:
del_worker_tasks = defaultdict(set)
for ts in tasks:
assert ts.who_has is not None
del_candidates = tuple(ts.who_has & workers)
if len(del_candidates) > n:
for ws in random.sample(
del_candidates, len(del_candidates) - n
):
del_worker_tasks[ws].add(ts)
# Note: this never raises exceptions
await asyncio.gather(
*[
self.delete_worker_data(
ws.address, [t.key for t in tasks], stimulus_id
)
for ws, tasks in del_worker_tasks.items()
]
)
# Copy not-yet-filled data
gathers: defaultdict[str, dict[Key, list[str]]]
while tasks:
gathers = defaultdict(dict)
for ts in list(tasks):
if ts.state == "forgotten":
# task is no longer needed by any client or dependent task
tasks.remove(ts)
continue
assert ts.who_has is not None
n_missing = n - len(ts.who_has & workers)
if n_missing <= 0:
# Already replicated enough
tasks.remove(ts)
continue
count = min(n_missing, branching_factor * len(ts.who_has))
assert count > 0
for ws in random.sample(tuple(workers - ts.who_has), count):
gathers[ws.address][ts.key] = [
wws.address for wws in ts.who_has
]
await asyncio.gather(
*(
# Note: this never raises exceptions
self.gather_on_worker(w, who_has)
for w, who_has in gathers.items()
)
)
for r, v in gathers.items():
self.log_event(r, {"action": "replicate-add", "who_has": v})
self.log_event(
"all",
{
"action": "replicate",
"workers": list(workers),
"key-count": len(keys),
"branching-factor": branching_factor,
},
)
return None
@log_errors
def workers_to_close(
self,
memory_ratio: int | float | None = None,
n: int | None = None,
key: Callable[[WorkerState], Hashable] | bytes | None = None,
minimum: int | None = None,
target: int | None = None,
attribute: str = "address",
) -> list[str]:
"""
Find workers that we can close with low cost
This returns a list of workers that are good candidates to retire.
These workers are not running anything and are storing
relatively little data relative to their peers. If all workers are
idle then we still maintain enough workers to have enough RAM to store
our data, with a comfortable buffer.
This is for use with systems like ``distributed.deploy.adaptive``.
Parameters
----------
memory_ratio : Number
Amount of extra space we want to have for our stored data.
Defaults to 2, or that we want to have twice as much memory as we
currently have data.
n : int
Number of workers to close
minimum : int
Minimum number of workers to keep around
key : Callable(WorkerState)
An optional callable mapping a WorkerState object to a group
affiliation. Groups will be closed together. This is useful when
closing workers must be done collectively, such as by hostname.
target : int
Target number of workers to have after we close
attribute : str
The attribute of the WorkerState object to return, like "address"
or "name". Defaults to "address".
Examples
--------
>>> scheduler.workers_to_close()
['tcp://192.168.0.1:1234', 'tcp://192.168.0.2:1234']
Group workers by hostname prior to closing
>>> scheduler.workers_to_close(key=lambda ws: ws.host)
['tcp://192.168.0.1:1234', 'tcp://192.168.0.1:4567']
Remove two workers
>>> scheduler.workers_to_close(n=2)
Keep enough workers to have twice as much memory as we we need.
>>> scheduler.workers_to_close(memory_ratio=2)
Returns
-------
to_close: list of worker addresses that are OK to close
See Also
--------
Scheduler.retire_workers
"""
if target is not None and n is None:
n = len(self.workers) - target
if n is not None:
if n < 0:
n = 0
target = len(self.workers) - n
if n is None and memory_ratio is None:
memory_ratio = 2
if not n and all([ws.processing for ws in self.workers.values()]):
return []
if key is None:
key = operator.attrgetter("address")
if isinstance(key, bytes):
key = pickle.loads(key)
# Long running tasks typically use a worker_client to schedule
# other tasks. We should never shut down the worker they're
# running on, as it would cause them to restart from scratch
# somewhere else.
valid_workers = [ws for ws in self.workers.values() if not ws.long_running]
for plugin in list(self.plugins.values()):
valid_workers = plugin.valid_workers_downscaling(self, valid_workers)
groups = groupby(key, valid_workers)
limit_bytes = {k: sum(ws.memory_limit for ws in v) for k, v in groups.items()}
group_bytes = {k: sum(ws.nbytes for ws in v) for k, v in groups.items()}
limit = sum(limit_bytes.values())
total = sum(group_bytes.values())
def _key(group: str) -> tuple[bool, int]:
is_idle = not any([wws.processing for wws in groups[group]])
bytes = -group_bytes[group]
return is_idle, bytes
idle = sorted(groups, key=_key)
to_close = []
n_remain = len(self.workers)
while idle:
group = idle.pop()
if n is None and any([ws.processing for ws in groups[group]]):
break
if minimum and n_remain - len(groups[group]) < minimum:
break
limit -= limit_bytes[group]
if (n is not None and n_remain - len(groups[group]) >= (target or 0)) or (
memory_ratio is not None and limit >= memory_ratio * total
):
to_close.append(group)
n_remain -= len(groups[group])
else:
break
result = [getattr(ws, attribute) for g in to_close for ws in groups[g]]
if result:
logger.debug("Suggest closing workers: %s", result)
return result
@overload
async def retire_workers(
self,
workers: list[str],
*,
close_workers: bool = False,
remove: bool = True,
stimulus_id: str | None = None,
) -> dict[str, Any]: ...
@overload
async def retire_workers(
self,
*,
names: list,
close_workers: bool = False,
remove: bool = True,
stimulus_id: str | None = None,
) -> dict[str, Any]: ...
@overload
async def retire_workers(
self,
*,
close_workers: bool = False,
remove: bool = True,
stimulus_id: str | None = None,
# Parameters for workers_to_close()
memory_ratio: int | float | None = None,
n: int | None = None,
key: Callable[[WorkerState], Hashable] | bytes | None = None,
minimum: int | None = None,
target: int | None = None,
attribute: str = "address",
) -> dict[str, Any]: ...
@log_errors
async def retire_workers(
self,
workers: list[str] | None = None,
*,
names: list | None = None,
close_workers: bool = False,
remove: bool = True,
stimulus_id: str | None = None,
**kwargs: Any,
) -> dict[str, Any]:
"""Gracefully retire workers from cluster. Any key that is in memory exclusively
on the retired workers is replicated somewhere else.
Parameters
----------
workers: list[str] (optional)
List of worker addresses to retire.
names: list (optional)
List of worker names to retire.
Mutually exclusive with ``workers``.
If neither ``workers`` nor ``names`` are provided, we call
``workers_to_close`` which finds a good set.
close_workers: bool (defaults to False)
Whether to actually close the worker explicitly from here.
Otherwise, we expect some external job scheduler to finish off the worker.
remove: bool (defaults to True)
Whether to remove the worker metadata immediately or else wait for the
worker to contact us.
If close_workers=False and remove=False, this method just flushes the tasks
in memory out of the workers and then returns.
If close_workers=True and remove=False, this method will return while the
workers are still in the cluster, although they won't accept new tasks.
If close_workers=False or for whatever reason a worker doesn't accept the
close command, it will be left permanently unable to accept new tasks and
it is expected to be closed in some other way.
**kwargs: dict
Extra options to pass to workers_to_close to determine which
workers we should drop. Only accepted if ``workers`` and ``names`` are
omitted.
Returns
-------
Dictionary mapping worker ID/address to dictionary of information about
that worker for each retired worker.
If there are keys that exist in memory only on the workers being retired and it
was impossible to replicate them somewhere else (e.g. because there aren't
any other running workers), the workers holding such keys won't be retired and
won't appear in the returned dict.
See Also
--------
Scheduler.workers_to_close
"""
if names is not None and workers is not None:
raise TypeError("names and workers are mutually exclusive")
if (names is not None or workers is not None) and kwargs:
raise TypeError(
"Parameters for workers_to_close() are mutually exclusive with "
f"names and workers: {kwargs}"
)
stimulus_id = stimulus_id or f"retire-workers-{time()}"
# This lock makes retire_workers, rebalance, and replicate mutually
# exclusive and will no longer be necessary once rebalance and replicate are
# migrated to the Active Memory Manager.
# However, it allows multiple instances of retire_workers to run in parallel.
async with self._replica_lock("retire-workers"):
if names is not None:
logger.info("Retire worker names %s", names)
# Support cases where names are passed through a CLI and become strings
names_set = {str(name) for name in names}
wss = {ws for ws in self.workers.values() if str(ws.name) in names_set}
elif workers is not None:
logger.info(
"Retire worker addresses (stimulus_id='%s') %s",
stimulus_id,
workers,
)
wss = {
self.workers[address]
for address in workers
if address in self.workers
}
else:
wss = {
self.workers[address] for address in self.workers_to_close(**kwargs)
}
if not wss:
return {}
stop_amm = False
amm: ActiveMemoryManagerExtension | None = self.extensions.get("amm")
if not amm or not amm.running:
amm = ActiveMemoryManagerExtension(
self, policies=set(), register=False, start=True, interval=2.0
)
stop_amm = True
try:
coros = []
for ws in wss:
policy = RetireWorker(ws.address)
amm.add_policy(policy)
# Change Worker.status to closing_gracefully. Immediately set
# the same on the scheduler to prevent race conditions.
prev_status = ws.status
self.handle_worker_status_change(
Status.closing_gracefully, ws, stimulus_id
)
# FIXME: We should send a message to the nanny first;
# eventually workers won't be able to close their own nannies.
self.stream_comms[ws.address].send(
{
"op": "worker-status-change",
"status": ws.status.name,
"stimulus_id": stimulus_id,
}
)
coros.append(
self._track_retire_worker(
ws,
policy,
prev_status=prev_status,
close=close_workers,
remove=remove,
stimulus_id=stimulus_id,
)
)
# Give the AMM a kick, in addition to its periodic running. This is
# to avoid unnecessarily waiting for a potentially arbitrarily long
# time (depending on interval settings)
amm.run_once()
workers_info_ok = {}
workers_info_abort = {}
for addr, result, info in await asyncio.gather(*coros):
if result == "OK":
workers_info_ok[addr] = info
else:
workers_info_abort[addr] = info
finally:
if stop_amm:
amm.stop()
self.log_event(
"all",
{
"action": "retire-workers",
"retired": list(workers_info_ok),
"could-not-retire": list(workers_info_abort),
"stimulus_id": stimulus_id,
},
)
self.log_event(
list(workers_info_ok),
{"action": "retired", "stimulus_id": stimulus_id},
)
self.log_event(
list(workers_info_abort),
{"action": "could-not-retire", "stimulus_id": stimulus_id},
)
return workers_info_ok
async def _track_retire_worker(
self,
ws: WorkerState,
policy: RetireWorker,
prev_status: Status,
close: bool,
remove: bool,
stimulus_id: str,
) -> tuple[str, Literal["OK", "no-recipients"], dict]:
while not policy.done():
# Sleep 0.01s when there are 4 tasks or less
# Sleep 0.5s when there are 200 or more
poll_interval = max(0.01, min(0.5, len(ws.has_what) / 400))
await asyncio.sleep(poll_interval)
if policy.no_recipients:
# Abort retirement. This time we don't need to worry about race
# conditions and we can wait for a scheduler->worker->scheduler
# round-trip.
self.stream_comms[ws.address].send(
{
"op": "worker-status-change",
"status": prev_status.name,
"stimulus_id": stimulus_id,
}
)
logger.warning(
f"Could not retire worker {ws.address!r}: unique data could not be "
f"moved to any other worker ({stimulus_id=!r})"
)
return ws.address, "no-recipients", ws.identity()
logger.debug(
f"All unique keys on worker {ws.address!r} have been replicated elsewhere"
)
if remove:
await self.remove_worker(
ws.address, expected=True, close=close, stimulus_id=stimulus_id
)
elif close:
self.close_worker(ws.address)
logger.info(f"Retired worker {ws.address!r} ({stimulus_id=!r})")
return ws.address, "OK", ws.identity()
def add_keys(
self, worker: str, keys: Collection[Key] = (), stimulus_id: str | None = None
) -> Literal["OK", "not found"]:
"""
Learn that a worker has certain keys
This should not be used in practice and is mostly here for legacy
reasons. However, it is sent by workers from time to time.
"""
if worker not in self.workers:
return "not found"
ws = self.workers[worker]
redundant_replicas = []
for key in keys:
ts = self.tasks.get(key)
if ts is not None and ts.state == "memory":
self.add_replica(ts, ws)
else:
redundant_replicas.append(key)
if redundant_replicas:
if not stimulus_id:
stimulus_id = f"redundant-replicas-{time()}"
self.worker_send(
worker,
{
"op": "remove-replicas",
"keys": redundant_replicas,
"stimulus_id": stimulus_id,
},
)
return "OK"
@log_errors
def update_data(
self,
*,
who_has: dict[Key, list[str]],
nbytes: dict[Key, int],
client: str | None = None,
) -> None:
"""Learn that new data has entered the network from an external source"""
who_has = {k: [self.coerce_address(vv) for vv in v] for k, v in who_has.items()}
logger.debug("Update data %s", who_has)
for key, workers in who_has.items():
ts = self.tasks.get(key)
if ts is None:
ts = self.new_task(key, None, "memory")
ts.state = "memory"
ts_nbytes = nbytes.get(key, -1)
if ts_nbytes >= 0:
ts.set_nbytes(ts_nbytes)
for w in workers:
ws = self.workers[w]
self.add_replica(ts, ws)
self.report({"op": "key-in-memory", "key": key, "workers": list(workers)})
if client:
self.client_desires_keys(keys=list(who_has), client=client)
@overload
def report_on_key(self, key: Key, *, client: str | None = None) -> None: ...
@overload
def report_on_key(self, *, ts: TaskState, client: str | None = None) -> None: ...
def report_on_key(
self,
key: Key | None = None,
*,
ts: TaskState | None = None,
client: str | None = None,
) -> None:
if (ts is None) == (key is None):
raise ValueError( # pragma: nocover
f"ts and key are mutually exclusive; received {key=!r}, {ts=!r}"
)
if ts is None:
assert key is not None
ts = self.tasks.get(key)
else:
key = ts.key
if ts is not None:
report_msg = _task_to_report_msg(ts)
else:
report_msg = {"op": "cancelled-keys", "keys": [key]}
if report_msg is not None:
self.report(report_msg, ts=ts, client=client)
@log_errors
async def feed(
self,
comm: Comm,
function: bytes | None = None,
setup: bytes | None = None,
teardown: bytes | None = None,
interval: str | float = "1s",
**kwargs: Any,
) -> None:
"""
Provides a data Comm to external requester
Caution: this runs arbitrary Python code on the scheduler. This should
eventually be phased out. It is mostly used by diagnostics.
"""
interval = parse_timedelta(interval)
if function:
function = pickle.loads(function)
if setup:
setup = pickle.loads(setup)
if teardown:
teardown = pickle.loads(teardown)
state = setup(self) if setup else None # type: ignore
if inspect.isawaitable(state):
state = await state
try:
while self.status == Status.running:
if state is None:
response = function(self) # type: ignore
else:
response = function(self, state) # type: ignore
await comm.write(response)
await asyncio.sleep(interval)
except OSError:
pass
finally:
if teardown:
teardown(self, state) # type: ignore
def log_worker_event(
self, worker: str, topic: str | Collection[str], msg: Any
) -> None:
if isinstance(msg, dict) and worker != topic:
msg["worker"] = worker
self.log_event(topic, msg)
def subscribe_worker_status(self, comm: Comm) -> dict[str, Any]:
WorkerStatusPlugin(self, comm)
ident = self.identity()
for v in ident["workers"].values():
del v["metrics"]
del v["last_seen"]
return ident
def get_processing(
self, workers: Iterable[str] | None = None
) -> dict[str, list[Key]]:
if workers is not None:
workers = set(map(self.coerce_address, workers))
return {w: [ts.key for ts in self.workers[w].processing] for w in workers}
else:
return {
w: [ts.key for ts in ws.processing] for w, ws in self.workers.items()
}
def get_who_has(self, keys: Iterable[Key] | None = None) -> dict[Key, list[str]]:
if keys is not None:
return {
key: (
[ws.address for ws in self.tasks[key].who_has or ()]
if key in self.tasks
else []
)
for key in keys
}
else:
return {
key: [ws.address for ws in ts.who_has or ()]
for key, ts in self.tasks.items()
}
def get_has_what(
self, workers: Iterable[str] | None = None
) -> dict[str, list[Key]]:
if workers is not None:
workers = map(self.coerce_address, workers)
return {
w: (
[ts.key for ts in self.workers[w].has_what]
if w in self.workers
else []
)
for w in workers
}
else:
return {w: [ts.key for ts in ws.has_what] for w, ws in self.workers.items()}
def get_ncores(self, workers: Iterable[str] | None = None) -> dict[str, int]:
if workers is not None:
workers = map(self.coerce_address, workers)
return {w: self.workers[w].nthreads for w in workers if w in self.workers}
else:
return {w: ws.nthreads for w, ws in self.workers.items()}
def get_ncores_running(
self, workers: Iterable[str] | None = None
) -> dict[str, int]:
ncores = self.get_ncores(workers=workers)
return {
w: n for w, n in ncores.items() if self.workers[w].status == Status.running
}
async def get_call_stack(self, keys: Iterable[Key] | None = None) -> dict[str, Any]:
workers: dict[str, list[Key] | None]
if keys is not None:
stack = list(keys)
processing = set()
while stack:
key = stack.pop()
ts = self.tasks[key]
if ts.state == "waiting":
stack.extend([dts.key for dts in ts.dependencies])
elif ts.state == "processing":
processing.add(ts)
workers = defaultdict(list)
for ts in processing:
if ts.processing_on:
wkeys = workers[ts.processing_on.address]
assert wkeys is not None
wkeys.append(ts.key)
else:
workers = {w: None for w in self.workers}
if not workers:
return {}
results = await asyncio.gather(
*(self.rpc(w).call_stack(keys=v) for w, v in workers.items())
)
response = {w: r for w, r in zip(workers, results) if r}
return response
async def benchmark_hardware(self) -> dict[str, dict[str, float]]:
"""
Run a benchmark on the workers for memory, disk, and network bandwidths
Returns
-------
result: dict
A dictionary mapping the names "disk", "memory", and "network" to
dictionaries mapping sizes to bandwidths. These bandwidths are
averaged over many workers running computations across the cluster.
"""
out: dict[str, defaultdict[str, list[float]]] = {
name: defaultdict(list) for name in ["disk", "memory", "network"]
}
# disk
result = await self.broadcast(msg={"op": "benchmark_disk"})
for d in result.values():
for size, duration in d.items():
out["disk"][size].append(duration)
# memory
result = await self.broadcast(msg={"op": "benchmark_memory"})
for d in result.values():
for size, duration in d.items():
out["memory"][size].append(duration)
# network
workers = list(self.workers)
# On an adaptive cluster, if multiple workers are started on the same physical host,
# they are more likely to connect to the Scheduler in sequence, ending up next to
# each other in this list.
# The transfer speed within such clusters of workers will be effectively that of
# localhost. This could happen across different VMs and/or docker images, so
# implementing logic based on IP addresses would not necessarily help.
# Randomize the connections to even out the mean measures.
random.shuffle(workers)
futures = [
self.rpc(a).benchmark_network(address=b) for a, b in partition(2, workers)
]
responses = await asyncio.gather(*futures)
for d in responses:
for size, duration in d.items():
out["network"][size].append(duration)
result = {}
for mode in out:
result[mode] = {
size: sum(durations) / len(durations)
for size, durations in out[mode].items()
}
return result
@log_errors
def get_nbytes(
self, keys: Iterable[Key] | None = None, summary: bool = True
) -> dict[Key, int]:
if keys is not None:
result = {k: self.tasks[k].nbytes for k in keys}
else:
result = {k: ts.nbytes for k, ts in self.tasks.items() if ts.nbytes >= 0}
if summary:
out: defaultdict[Key, int] = defaultdict(int)
for k, v in result.items():
out[key_split(k)] += v
result = dict(out)
return result
def run_function(
self,
comm: Comm,
function: Callable,
args: tuple = (),
kwargs: dict | None = None,
wait: bool = True,
) -> Any:
"""Run a function within this process
See Also
--------
Client.run_on_scheduler
"""
from distributed.worker import run
kwargs = kwargs or {}
self.log_event("all", {"action": "run-function", "function": function})
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def set_metadata(self, keys: list[Key], value: object = None) -> None:
metadata = self.task_metadata
for key in keys[:-1]:
if key not in metadata or not isinstance(metadata[key], (dict, list)):
metadata[key] = {}
metadata = metadata[key]
metadata[keys[-1]] = value
def get_metadata(self, keys: list[Key], default: Any = no_default) -> Any:
metadata = self.task_metadata
try:
for key in keys:
metadata = metadata[key]
return metadata
except KeyError:
if default is not no_default:
return default
else:
raise
def set_restrictions(self, worker: dict[Key, Collection[str] | str | None]) -> None:
for key, restrictions in worker.items():
ts = self.tasks[key]
if isinstance(restrictions, str):
restrictions = {restrictions}
ts.worker_restrictions = set(restrictions) if restrictions else None
@log_errors
def get_task_prefix_states(self) -> dict[str, dict[str, int]]:
state = {}
for tp in self.task_prefixes.values():
states = tp.states
ss: list[TaskStateState] = [
"memory",
"erred",
"released",
"processing",
"waiting",
]
if any(states.get(s) for s in ss):
state[tp.name] = {
"memory": states["memory"],
"erred": states["erred"],
"released": states["released"],
"processing": states["processing"],
"waiting": states["waiting"],
}
return state
def get_task_status(self, keys: Iterable[Key]) -> dict[Key, TaskStateState | None]:
return {
key: (self.tasks[key].state if key in self.tasks else None) for key in keys
}
def get_task_stream(
self,
start: str | float | None = None,
stop: str | float | None = None,
count: int | None = None,
) -> list:
from distributed.diagnostics.task_stream import TaskStreamPlugin
if TaskStreamPlugin.name not in self.plugins:
self.add_plugin(TaskStreamPlugin(self))
plugin = cast(TaskStreamPlugin, self.plugins[TaskStreamPlugin.name])
return plugin.collect(start=start, stop=stop, count=count)
def start_task_metadata(self, name: str) -> None:
plugin = CollectTaskMetaDataPlugin(scheduler=self, name=name)
self.add_plugin(plugin)
def stop_task_metadata(self, name: str | None = None) -> dict:
plugins = [
p
for p in list(self.plugins.values())
if isinstance(p, CollectTaskMetaDataPlugin) and p.name == name
]
if len(plugins) != 1:
raise ValueError(
"Expected to find exactly one CollectTaskMetaDataPlugin "
f"with name {name} but found {len(plugins)}."
)
plugin = plugins[0]
self.remove_plugin(name=plugin.name)
return {"metadata": plugin.metadata, "state": plugin.state}
async def register_worker_plugin(
self, comm: None, plugin: bytes, name: str, idempotent: bool | None = None
) -> dict[str, OKMessage]:
"""Registers a worker plugin on all running and future workers"""
logger.info("Registering Worker plugin %s", name)
if idempotent is None:
warnings.warn(
"The signature of `Scheduler.register_worker_plugin` now requires "
"`idempotent`. Not including `idempotent` in the signature will no longer "
"be supported in future versions.",
FutureWarning,
)
idempotent = False
if name in self.worker_plugins and idempotent:
return {}
self.worker_plugins[name] = plugin
responses = await self.broadcast(
msg=dict(op="plugin-add", plugin=plugin, name=name)
)
return responses
async def unregister_worker_plugin(
self, comm: None, name: str
) -> dict[str, ErrorMessage | OKMessage]:
"""Unregisters a worker plugin"""
try:
self.worker_plugins.pop(name)
except KeyError:
raise ValueError(f"The worker plugin {name} does not exist")
responses = await self.broadcast(msg=dict(op="plugin-remove", name=name))
return responses
async def register_nanny_plugin(
self, comm: None, plugin: bytes, name: str, idempotent: bool | None = None
) -> dict[str, OKMessage]:
"""Registers a nanny plugin on all running and future nannies"""
logger.info("Registering Nanny plugin %s", name)
if idempotent is None:
warnings.warn(
"The signature of `Scheduler.register_nanny_plugin` now requires "
"`idempotent`. Not including `idempotent` in the signature will no longer "
"be supported in future versions.",
FutureWarning,
)
idempotent = False
if name in self.nanny_plugins and idempotent:
return {}
self.nanny_plugins[name] = plugin
async with self._starting_nannies_cond:
if self._starting_nannies:
logger.info("Waiting for Nannies to start %s", self._starting_nannies)
await self._starting_nannies_cond.wait_for(
lambda: not self._starting_nannies
)
responses = await self.broadcast(
msg=dict(op="plugin_add", plugin=plugin, name=name),
nanny=True,
)
return responses
async def unregister_nanny_plugin(
self, comm: None, name: str
) -> dict[str, ErrorMessage | OKMessage]:
"""Unregisters a worker plugin"""
try:
self.nanny_plugins.pop(name)
except KeyError:
raise ValueError(f"The nanny plugin {name} does not exist")
responses = await self.broadcast(
msg=dict(op="plugin_remove", name=name), nanny=True
)
return responses
def transition(
self,
key: Key,
finish: TaskStateState,
stimulus_id: str,
**kwargs: Any,
) -> Recs:
"""Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
recommendations, client_msgs, worker_msgs = self._transition(
key, finish, stimulus_id, **kwargs
)
self.send_all(client_msgs, worker_msgs)
return recommendations
def transitions(self, recommendations: Recs, stimulus_id: str) -> None:
"""Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
client_msgs: Msgs = {}
worker_msgs: Msgs = {}
self._transitions(recommendations, client_msgs, worker_msgs, stimulus_id)
self.send_all(client_msgs, worker_msgs)
async def get_story(self, keys_or_stimuli: Iterable[Key | str]) -> list[Transition]:
"""RPC hook for :meth:`SchedulerState.story`.
Note that the msgpack serialization/deserialization round-trip will transform
the :class:`Transition` namedtuples into regular tuples.
"""
return self.story(*keys_or_stimuli)
def _reschedule(
self, key: Key, worker: str | None = None, *, stimulus_id: str
) -> None:
"""Reschedule a task.
This function should only be used when the task has already been released in
some way on the worker it's assigned to — either via cancellation or a
Reschedule exception — and you are certain the worker will not send any further
updates about the task to the scheduler.
"""
try:
ts = self.tasks[key]
except KeyError:
logger.warning(
f"Attempting to reschedule task {key!r}, which was not "
"found on the scheduler. Aborting reschedule."
)
return
if ts.state != "processing":
return
if worker and ts.processing_on and ts.processing_on.address != worker:
return
# transition_processing_released will immediately suggest an additional
# transition to waiting if the task has any waiters or clients holding a future.
self.transitions({key: "released"}, stimulus_id=stimulus_id)
#####################
# Utility functions #
#####################
def add_resources(
self, worker: str, resources: dict | None = None
) -> Literal["OK"]:
ws = self.workers[worker]
if resources:
ws.resources.update(resources)
ws.used_resources = {}
for resource, quantity in ws.resources.items():
ws.used_resources[resource] = 0
dr = self.resources.get(resource, None)
if dr is None:
self.resources[resource] = dr = {}
dr[worker] = quantity
return "OK"
def remove_resources(self, worker: str) -> None:
ws = self.workers[worker]
for resource in ws.resources:
dr = self.resources.setdefault(resource, {})
del dr[worker]
def coerce_address(self, addr: str | tuple, resolve: bool = True) -> str:
"""
Coerce possible input addresses to canonical form.
*resolve* can be disabled for testing with fake hostnames.
Handles strings, tuples, or aliases.
"""
# XXX how many address-parsing routines do we have?
if addr in self.aliases:
addr = self.aliases[addr]
if isinstance(addr, tuple):
addr = unparse_host_port(*addr)
if not isinstance(addr, str):
raise TypeError(f"addresses should be strings or tuples, got {addr!r}")
if resolve:
addr = resolve_address(addr)
else:
addr = normalize_address(addr)
return addr
def workers_list(self, workers: Iterable[str] | None) -> list[str]:
"""
List of qualifying workers
Takes a list of worker addresses or hostnames.
Returns a list of all worker addresses that match
"""
if workers is None:
return list(self.workers)
out = set()
for w in workers:
if ":" in w:
out.add(w)
else:
out.update({ww for ww in self.workers if w in ww}) # TODO: quadratic
return list(out)
async def get_profile(
self,
workers: Iterable | None = None,
scheduler: bool = False,
server: bool = False,
merge_workers: bool = True,
start: float | None = None,
stop: float | None = None,
key: Key | None = None,
) -> dict:
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
if scheduler:
return profile.get_profile(
self.io_loop.profile, # type: ignore[attr-defined]
start=start,
stop=stop,
)
results = await asyncio.gather(
*(
self.rpc(w).profile(start=start, stop=stop, key=key, server=server)
for w in workers
),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
response: dict
if merge_workers:
response = profile.merge(*results) # type: ignore
else:
response = dict(zip(workers, results))
return response
async def get_profile_metadata(
self,
workers: Iterable[str] | None = None,
start: float = 0,
stop: float | None = None,
profile_cycle_interval: str | float | None = None,
) -> dict[str, Any]:
dt = profile_cycle_interval or dask.config.get(
"distributed.worker.profile.cycle"
)
dt = parse_timedelta(dt, default="ms")
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
results: Sequence[Any] = await asyncio.gather(
*(self.rpc(w).profile_metadata(start=start, stop=stop) for w in workers),
return_exceptions=True,
)
results = [r for r in results if not isinstance(r, Exception)]
counts = [
(time, sum(pluck(1, group)))
for time, group in itertools.groupby(
merge_sorted(
*(v["counts"] for v in results),
),
lambda t: t[0] // dt * dt,
)
]
keys: dict[Key, list[list]] = {
k: [] for v in results for t, d in v["keys"] for k in d
}
groups1 = [v["keys"] for v in results]
groups2 = list(merge_sorted(*groups1, key=first))
last = 0
for t, d in groups2:
tt = t // dt * dt
if tt > last:
last = tt
for v in keys.values():
v.append([tt, 0])
for k, v in d.items():
keys[k][-1][1] += v
return {"counts": counts, "keys": keys}
async def performance_report(
self, start: float, last_count: int, code: str = "", mode: str | None = None
) -> str:
stop = time()
# Profiles
compute_d, scheduler_d, workers_d = await asyncio.gather(
*[
self.get_profile(start=start),
self.get_profile(scheduler=True, start=start),
self.get_profile(server=True, start=start),
]
)
from distributed import profile
def profile_to_figure(state: object) -> object:
data = profile.plot_data(state)
figure, source = profile.plot_figure(data, sizing_mode="stretch_both")
return figure
compute, scheduler, workers = map(
profile_to_figure, (compute_d, scheduler_d, workers_d)
)
del compute_d, scheduler_d, workers_d
# Task stream
task_stream = self.get_task_stream(start=start)
total_tasks = len(task_stream)
timespent: defaultdict[str, float] = defaultdict(float)
for d in task_stream:
for x in d["startstops"]:
timespent[x["action"]] += x["stop"] - x["start"]
tasks_timings = ""
for k in sorted(timespent.keys()):
tasks_timings += f"\n<li> {k} time: {format_time(timespent[k])} </li>"
from distributed.dashboard.components.scheduler import task_stream_figure
from distributed.diagnostics.task_stream import rectangles
rects = rectangles(task_stream)
source, task_stream = task_stream_figure(sizing_mode="stretch_both")
source.data.update(rects)
# Bandwidth
from distributed.dashboard.components.scheduler import (
BandwidthTypes,
BandwidthWorkers,
)
bandwidth_workers = BandwidthWorkers(self, sizing_mode="stretch_both")
bandwidth_workers.update()
bandwidth_types = BandwidthTypes(self, sizing_mode="stretch_both")
bandwidth_types.update()
# System monitor
from distributed.dashboard.components.shared import SystemMonitor
sysmon = SystemMonitor(self, last_count=last_count, sizing_mode="stretch_both")
sysmon.update()
# Scheduler logs
from distributed.dashboard.components.scheduler import _STYLES, SchedulerLogs
logs = SchedulerLogs(self, start=start)
from bokeh.models import Div, TabPanel, Tabs
import distributed
# HTML
duration = format_time(stop - start)
nworkers = len(self.workers)
threads = sum(ws.nthreads for ws in self.workers.values())
memory = format_bytes(sum(ws.memory_limit for ws in self.workers.values()))
html = f"""
<h1> Dask Performance Report </h1>
<i> Select different tabs on the top for additional information </i>
<h2> Duration: {duration} </h2>
<h2> Tasks Information </h2>
<ul>
<li> number of tasks: {total_tasks} </li>
{tasks_timings}
</ul>
<h2> Scheduler Information </h2>
<ul>
<li> Address: {self.address} </li>
<li> Workers: {nworkers} </li>
<li> Threads: {threads} </li>
<li> Memory: {memory} </li>
<li> Dask Version: {dask.__version__} </li>
<li> Dask.Distributed Version: {distributed.__version__} </li>
</ul>
<h2> Calling Code </h2>
<pre>
{code}
</pre>
"""
html = Div(text=html, styles=_STYLES)
html = TabPanel(child=html, title="Summary")
compute = TabPanel(child=compute, title="Worker Profile (compute)")
workers = TabPanel(child=workers, title="Worker Profile (administrative)")
scheduler = TabPanel(
child=scheduler, title="Scheduler Profile (administrative)"
)
task_stream = TabPanel(child=task_stream, title="Task Stream")
bandwidth_workers = TabPanel(
child=bandwidth_workers.root, title="Bandwidth (Workers)"
)
bandwidth_types = TabPanel(
child=bandwidth_types.root, title="Bandwidth (Types)"
)
system = TabPanel(child=sysmon.root, title="System")
logs = TabPanel(child=logs.root, title="Scheduler Logs")
tabs = Tabs(
tabs=[
html,
task_stream,
system,
logs,
compute,
workers,
scheduler,
bandwidth_workers,
bandwidth_types,
],
sizing_mode="stretch_both",
)
from bokeh.core.templates import get_env
from bokeh.plotting import output_file, save
with tmpfile(extension=".html") as fn:
output_file(filename=fn, title="Dask Performance Report", mode=mode)
template_directory = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard", "templates"
)
template_environment = get_env()
template_environment.loader.searchpath.append(template_directory)
template = template_environment.get_template("performance_report.html")
save(tabs, filename=fn, template=template)
with open(fn) as f:
data = f.read()
return data
async def get_worker_logs(
self, n: int | None = None, workers: list | None = None, nanny: bool = False
) -> dict:
results = await self.broadcast(
msg={"op": "get_logs", "n": n}, workers=workers, nanny=nanny
)
return results
def log_event(self, topic: str | Collection[str], msg: Any) -> None:
"""Log an event under a given topic
Parameters
----------
topic : str, list[str]
Name of the topic under which to log an event. To log the same
event under multiple topics, pass a list of topic names.
msg
Event message to log. Note this must be msgpack serializable.
See also
--------
Client.log_event
"""
self._broker.publish(topic, msg)
def subscribe_topic(self, topic: str, client: str) -> None:
self._broker.subscribe(topic, client)
def unsubscribe_topic(self, topic: str, client: str) -> None:
self._broker.unsubscribe(topic, client)
@overload
def get_events(self, topic: str) -> tuple[tuple[float, Any], ...]: ...
@overload
def get_events(self) -> dict[str, tuple[tuple[float, Any], ...]]: ...
def get_events(
self, topic: str | None = None
) -> tuple[tuple[float, Any], ...] | dict[str, tuple[tuple[float, Any], ...]]:
return self._broker.get_events(topic)
async def get_worker_monitor_info(
self, recent: bool = False, starts: dict | None = None
) -> dict:
if starts is None:
starts = {}
results = await asyncio.gather(
*(
self.rpc(w).get_monitor_info(recent=recent, start=starts.get(w, 0))
for w in self.workers
)
)
return dict(zip(self.workers, results))
###########
# Cleanup #
###########
@log_errors
async def check_worker_ttl(self) -> None:
now = time()
stimulus_id = f"check-worker-ttl-{now}"
assert self.worker_ttl
ttl = max(self.worker_ttl, 10 * heartbeat_interval(len(self.workers)))
to_restart = []
for ws in self.workers.values():
last_seen = now - ws.last_seen
if last_seen > ttl:
to_restart.append(ws.address)
logger.warning(
f"Worker failed to heartbeat for {last_seen:.0f}s; "
f"{'attempting restart' if ws.nanny else 'removing'}: {ws}"
)
if to_restart:
self.log_event(
"scheduler",
{
"action": "worker-ttl-timed-out",
"workers": to_restart.copy(),
"ttl": ttl,
},
)
await self.restart_workers(
to_restart,
wait_for_workers=False,
stimulus_id=stimulus_id,
)
def check_idle(self) -> float | None:
if self.status in (Status.closing, Status.closed):
return None # pragma: nocover
if self.transition_counter != self._idle_transition_counter:
self._idle_transition_counter = self.transition_counter
self.idle_since = None
return None
if self._active_graph_updates > 0:
self.idle_since = None
return None
if (
self.queued
or self.unrunnable
or any(ws.processing for ws in self.workers.values())
):
self.idle_since = None
return None
if not self.idle_since:
self.idle_since = time()
return self.idle_since
if self.jupyter:
last_activity = (
self._jupyter_server_application.web_app.last_activity().timestamp()
)
if last_activity > self.idle_since:
self.idle_since = last_activity
return self.idle_since
if self.idle_timeout:
if time() > self.idle_since + self.idle_timeout:
assert self.idle_since
logger.info(
"Scheduler closing after being idle for %s",
format_time(self.idle_timeout),
)
self._ongoing_background_tasks.call_soon(
self.close, reason="idle-timeout-exceeded"
)
return self.idle_since
def _check_no_workers(self) -> None:
if (
self.status in (Status.closing, Status.closed)
or self.no_workers_timeout is None
):
return
now = monotonic()
stimulus_id = f"check-no-workers-timeout-{time()}"
recommendations: Recs = {}
self._refresh_no_workers_since(now)
affected = self._check_unrunnable_task_timeouts(
now, recommendations=recommendations, stimulus_id=stimulus_id
)
affected.update(
self._check_queued_task_timeouts(
now, recommendations=recommendations, stimulus_id=stimulus_id
)
)
self.transitions(recommendations, stimulus_id=stimulus_id)
if affected:
self.log_event(
"scheduler",
{"action": "no-workers-timeout-exceeded", "keys": affected},
)
def _check_unrunnable_task_timeouts(
self, timestamp: float, recommendations: Recs, stimulus_id: str
) -> set[Key]:
assert self.no_workers_timeout
unsatisfied = []
no_workers = []
for ts, unrunnable_since in self.unrunnable.items():
if timestamp <= unrunnable_since + self.no_workers_timeout:
# unrunnable is insertion-ordered, which means that unrunnable_since will
# be monotonically increasing in this loop.
break
if (
self._no_workers_since is None
or self._no_workers_since >= unrunnable_since
):
unsatisfied.append(ts)
else:
no_workers.append(ts)
if not unsatisfied and not no_workers:
return set()
for ts in unsatisfied:
e = pickle.dumps(
NoValidWorkerError(
task=ts.key,
host_restrictions=(ts.host_restrictions or set()).copy(),
worker_restrictions=(ts.worker_restrictions or set()).copy(),
resource_restrictions=(ts.resource_restrictions or {}).copy(),
timeout=self.no_workers_timeout,
),
)
r = self.transition(
ts.key,
"erred",
exception=e,
cause=ts.key,
stimulus_id=stimulus_id,
)
recommendations.update(r)
logger.error(
"Task %s marked as failed because it timed out waiting "
"for its restrictions to become satisfied.",
ts.key,
)
self._fail_tasks_after_no_workers_timeout(
no_workers, recommendations, stimulus_id
)
return {ts.key for ts in concat([unsatisfied, no_workers])}
def _check_queued_task_timeouts(
self, timestamp: float, recommendations: Recs, stimulus_id: str
) -> set[Key]:
assert self.no_workers_timeout
if self._no_workers_since is None:
return set()
if timestamp <= self._no_workers_since + self.no_workers_timeout:
return set()
affected = list(self.queued)
self._fail_tasks_after_no_workers_timeout(
affected, recommendations, stimulus_id
)
return {ts.key for ts in affected}
def _fail_tasks_after_no_workers_timeout(
self, timed_out: Iterable[TaskState], recommendations: Recs, stimulus_id: str
) -> None:
assert self.no_workers_timeout
for ts in timed_out:
e = pickle.dumps(
NoWorkerError(
task=ts.key,
timeout=self.no_workers_timeout,
),
)
r = self.transition(
ts.key,
"erred",
exception=e,
cause=ts.key,
stimulus_id=stimulus_id,
)
recommendations.update(r)
logger.error(
"Task %s marked as failed because it timed out waiting "
"without any running workers.",
ts.key,
)
def _refresh_no_workers_since(self, timestamp: float | None = None) -> None:
if self.running or not (self.queued or self.unrunnable):
self._no_workers_since = None
return
if not self._no_workers_since:
self._no_workers_since = timestamp or monotonic()
return
def adaptive_target(self, target_duration: float | None = None) -> int:
"""Desired number of workers based on the current workload
This looks at the current running tasks and memory use, and returns a
number of desired workers. This is often used by adaptive scheduling.
Parameters
----------
target_duration : str
A desired duration of time for computations to take. This affects
how rapidly the scheduler will ask to scale.
See Also
--------
distributed.deploy.Adaptive
"""
if target_duration is None:
target_duration = dask.config.get("distributed.adaptive.target-duration")
target_duration = parse_timedelta(target_duration)
# CPU
queued = take(100, concat([self.queued, self.unrunnable.keys()]))
queued_occupancy = 0.0
for ts in queued:
queued_occupancy += self._get_prefix_duration(ts.prefix)
tasks_ready = len(self.queued) + len(self.unrunnable)
if tasks_ready > 100:
queued_occupancy *= tasks_ready / 100
cpu = math.ceil((self.total_occupancy + queued_occupancy) / target_duration)
# Avoid a few long tasks from asking for many cores
for ws in self.workers.values():
if tasks_ready > cpu:
break
tasks_ready += len(ws.processing)
else:
cpu = min(tasks_ready, cpu)
# Divide by average nthreads per worker
if self.workers:
nthreads = sum(ws.nthreads for ws in self.workers.values())
cpu = math.ceil(cpu / nthreads * len(self.workers))
if (self.unrunnable or self.queued) and not self.workers:
cpu = max(1, cpu)
# add more workers if more than 60% of memory is used
limit = sum(ws.memory_limit for ws in self.workers.values())
used = sum(ws.nbytes for ws in self.workers.values())
memory = 0
if used > 0.6 * limit and limit > 0:
memory = 2 * len(self.workers)
target = max(memory, cpu)
if target >= len(self.workers):
return target
else: # Scale down?
to_close = self.workers_to_close()
return len(self.workers) - len(to_close)
def request_acquire_replicas(
self, addr: str, keys: Iterable[Key], *, stimulus_id: str
) -> None:
"""Asynchronously ask a worker to acquire a replica of the listed keys from
other workers. This is a fire-and-forget operation which offers no feedback for
success or failure, and is intended for housekeeping and not for computation.
"""
who_has = {}
nbytes = {}
for key in keys:
ts = self.tasks[key]
assert ts.who_has
who_has[key] = [ws.address for ws in ts.who_has or ()]
nbytes[key] = ts.nbytes
self.stream_comms[addr].send(
{
"op": "acquire-replicas",
"who_has": who_has,
"nbytes": nbytes,
"stimulus_id": stimulus_id,
},
)
def request_remove_replicas(
self, addr: str, keys: list[Key], *, stimulus_id: str
) -> None:
"""Asynchronously ask a worker to discard its replica of the listed keys.
This must never be used to destroy the last replica of a key. This is a
fire-and-forget operation, intended for housekeeping and not for computation.
The replica disappears immediately from TaskState.who_has on the Scheduler side;
if the worker refuses to delete, e.g. because the task is a dependency of
another task running on it, it will (also asynchronously) inform the scheduler
to re-add itself to who_has. If the worker agrees to discard the task, there is
no feedback.
"""
ws = self.workers[addr]
# The scheduler immediately forgets about the replica and suggests the worker to
# drop it. The worker may refuse, at which point it will send back an add-keys
# message to reinstate it.
for key in keys:
ts = self.tasks[key]
if self.validate:
# Do not destroy the last copy
assert ts.who_has
assert len(ts.who_has) > 1
self.remove_replica(ts, ws)
self.stream_comms[addr].send(
{
"op": "remove-replicas",
"keys": keys,
"stimulus_id": stimulus_id,
}
)
def _task_to_report_msg(ts: TaskState) -> dict[str, Any] | None:
if ts.state == "forgotten":
return {"op": "cancelled-keys", "keys": [ts.key], "reason": "already forgotten"}
elif ts.state == "memory":
return {"op": "key-in-memory", "key": ts.key}
elif ts.state == "erred":
failing_ts = ts.exception_blame
assert failing_ts
return {
"op": "task-erred",
"key": ts.key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
else:
return None
def _task_to_client_msgs(ts: TaskState) -> Msgs:
if ts.who_wants:
report_msg = _task_to_report_msg(ts)
if report_msg is not None:
return {cs.client_key: [report_msg] for cs in ts.who_wants}
return {}
def decide_worker(
ts: TaskState,
all_workers: set[WorkerState],
valid_workers: set[WorkerState] | None,
objective: Callable[[WorkerState], Any],
) -> WorkerState | None:
"""
Decide which worker should take task *ts*.
We choose the worker that has the data on which *ts* depends.
If several workers have dependencies then we choose the less-busy worker.
Optionally provide *valid_workers* of where jobs are allowed to occur
(if all workers are allowed to take the task, pass None instead).
If the task requires data communication because no eligible worker has
all the dependencies already, then we choose to minimize the number
of bytes sent between workers. This is determined by calling the
*objective* function.
"""
assert all(dts.who_has for dts in ts.dependencies)
if ts.actor:
candidates = all_workers.copy()
else:
candidates = {wws for dts in ts.dependencies for wws in dts.who_has or ()}
candidates &= all_workers
if valid_workers is None:
if not candidates:
candidates = all_workers.copy()
else:
candidates &= valid_workers
if not candidates:
candidates = valid_workers
if not candidates:
if ts.loose_restrictions:
return decide_worker(ts, all_workers, None, objective)
if not candidates:
return None
elif len(candidates) == 1:
return next(iter(candidates))
else:
return min(candidates, key=objective)
def validate_task_state(ts: TaskState) -> None:
"""Validate the given TaskState"""
assert ts.state in ALL_TASK_STATES, ts
if ts.waiting_on:
assert ts.waiting_on.issubset(ts.dependencies), (
"waiting not subset of dependencies",
str(ts.waiting_on),
str(ts.dependencies),
)
if ts.waiters:
assert ts.waiters.issubset(ts.dependents), (
"waiters not subset of dependents",
str(ts.waiters),
str(ts.dependents),
)
for dts in ts.waiting_on or ():
assert not dts.who_has, ("waiting on in-memory dep", str(ts), str(dts))
assert dts.state != "released", ("waiting on released dep", str(ts), str(dts))
for dts in ts.dependencies:
assert ts in dts.dependents, (
"not in dependency's dependents",
str(ts),
str(dts),
str(dts.dependents),
)
if ts.state in ("waiting", "queued", "processing", "no-worker"):
assert ts.waiting_on and dts in ts.waiting_on or dts.who_has, (
"dep missing",
str(ts),
str(dts),
)
assert dts.state != "forgotten"
for dts in ts.waiters or ():
assert dts.state in ("waiting", "queued", "processing", "no-worker"), (
"waiter not in play",
str(ts),
str(dts),
)
for dts in ts.dependents:
assert ts in dts.dependencies, (
"not in dependent's dependencies",
str(ts),
str(dts),
str(dts.dependencies),
)
assert dts.state != "forgotten"
assert (ts.processing_on is not None) == (ts.state == "processing")
assert bool(ts.who_has) == (ts.state == "memory"), (ts, ts.who_has, ts.state)
if ts.state == "queued":
assert not ts.processing_on
assert not ts.who_has
assert all(dts.who_has for dts in ts.dependencies), (
"task queued without all deps",
str(ts),
str(ts.dependencies),
)
if ts.state == "processing":
assert all(dts.who_has for dts in ts.dependencies), (
"task processing without all deps",
str(ts),
str(ts.dependencies),
)
assert not ts.waiting_on
if ts.who_has:
assert ts.waiters or ts.who_wants, (
"unneeded task in memory",
str(ts),
str(ts.who_has),
)
if ts.run_spec: # was computed
assert ts.type
assert isinstance(ts.type, str)
assert not any(
[
ts in dts.waiting_on
for dts in ts.dependents
if dts.waiting_on is not None
]
)
for ws in ts.who_has:
assert ts in ws.has_what, (
"not in who_has' has_what",
str(ts),
str(ws),
str(ws.has_what),
)
for cs in ts.who_wants or ():
assert ts in cs.wants_what, (
"not in who_wants' wants_what",
str(ts),
str(cs),
str(cs.wants_what),
)
if ts.actor:
if ts.state == "memory":
assert ts.who_has
assert sum(ts in ws.actors for ws in ts.who_has) == 1
if ts.state == "processing":
assert ts.processing_on
assert ts in ts.processing_on.actors
assert ts.state != "queued"
def validate_unrunnable(unrunnable: dict[TaskState, float]) -> None:
prev_unrunnable_since: float | None = None
prev_ts: TaskState | None = None
for ts, unrunnable_since in unrunnable.items():
assert ts.state == "no-worker"
if prev_ts is not None:
assert prev_unrunnable_since is not None
# Ensure that unrunnable_since is monotonically increasing when iterating over unrunnable.
# _check_no_workers relies on this.
assert prev_unrunnable_since <= unrunnable_since, (
prev_ts,
ts,
prev_unrunnable_since,
unrunnable_since,
)
prev_ts = ts
prev_unrunnable_since = unrunnable_since
def validate_worker_state(ws: WorkerState) -> None:
for ts in ws.has_what or ():
assert ts.who_has
assert ws in ts.who_has, (
"not in has_what' who_has",
str(ws),
str(ts),
str(ts.who_has),
)
for ts in ws.actors:
assert ts.state in ("memory", "processing")
def validate_state(
tasks: dict[Key, TaskState],
workers: dict[str, WorkerState],
clients: dict[str, ClientState],
) -> None:
"""Validate a current runtime state.
This performs a sequence of checks on the entire graph, running in about linear
time. This raises assert errors if anything doesn't check out.
"""
for ts in tasks.values():
validate_task_state(ts)
for ws in workers.values():
validate_worker_state(ws)
for cs in clients.values():
for ts in cs.wants_what or ():
assert ts.who_wants
assert cs in ts.who_wants, (
"not in wants_what' who_wants",
str(cs),
str(ts),
str(ts.who_wants),
)
def heartbeat_interval(n: int) -> float:
"""Interval in seconds that we desire heartbeats based on number of workers"""
if n <= 10:
return 0.5
elif n < 50:
return 1
elif n < 200:
return 2
else:
# No more than 200 heartbeats a second scaled by workers
return n / 200 + 1
def _task_slots_available(ws: WorkerState, saturation_factor: float) -> int:
"""Number of tasks that can be sent to this worker without oversaturating it"""
assert not math.isinf(saturation_factor)
return max(math.ceil(saturation_factor * ws.nthreads), 1) - (
len(ws.processing) - len(ws.long_running)
)
def _worker_full(ws: WorkerState, saturation_factor: float) -> bool:
if math.isinf(saturation_factor):
return False
return _task_slots_available(ws, saturation_factor) <= 0
| Scheduler |
python | huggingface__transformers | src/transformers/models/nllb_moe/modeling_nllb_moe.py | {
"start": 15215,
"end": 16147
} | class ____(nn.Module):
def __init__(self, config: NllbMoeConfig, ffn_dim: int):
super().__init__()
self.fc1 = nn.Linear(config.d_model, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, config.d_model)
self.dropout = nn.Dropout(config.activation_dropout)
self.act = ACT2FN[config.activation_function]
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if (
isinstance(self.fc2.weight, torch.Tensor)
and hidden_states.dtype != self.fc2.weight.dtype
and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
):
hidden_states = hidden_states.to(self.fc2.weight.dtype)
hidden_states = self.fc2(hidden_states)
return hidden_states
| NllbMoeDenseActDense |
python | pypa__pip | src/pip/_vendor/rich/progress.py | {
"start": 29120,
"end": 30038
} | class ____(ProgressColumn):
"""Renders completed count/total, e.g. ' 10/1000'.
Best for bounded tasks with int quantities.
Space pads the completed count so that progress length does not change as task progresses
past powers of 10.
Args:
separator (str, optional): Text to separate completed and total values. Defaults to "/".
"""
def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
self.separator = separator
super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
"""Show completed/total."""
completed = int(task.completed)
total = int(task.total) if task.total is not None else "?"
total_width = len(str(total))
return Text(
f"{completed:{total_width}d}{self.separator}{total}",
style="progress.download",
)
| MofNCompleteColumn |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-inversions.py | {
"start": 3138,
"end": 3696
} | class ____(object):
def numberOfPermutations(self, n, requirements):
"""
:type n: int
:type requirements: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
lookup = [-1]*n
for i, c in requirements:
lookup[i] = c
dp = [0]*(lookup[-1]+1)
dp[0] = 1
for i in xrange(n):
dp = [reduce(lambda total, k: (total+dp[j-k])%MOD, xrange(min(i+1, j+1)), 0) if lookup[i] == -1 or lookup[i] == j else 0 for j in xrange(len(dp))]
return dp[-1]%MOD
| Solution4 |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 51084,
"end": 51468
} | class ____(
axis_ticks_length_minor_x, axis_ticks_length_minor_y
):
"""
Axis minor-tick length
Parameters
----------
theme_element : float | complex
Value in points. A negative value creates the ticks
inside the plot panel. A complex value (e.g. `3j`)
creates ticks that span both in and out of the panel.
"""
| axis_ticks_length_minor |
python | pandas-dev__pandas | pandas/tests/indexes/interval/test_constructors.py | {
"start": 10629,
"end": 11996
} | class ____(ConstructorTests):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
"""Fixture for IntervalIndex.from_breaks constructor"""
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {"breaks": breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
def test_left_right_dont_share_data(self):
# GH#36310
breaks = np.arange(5)
result = IntervalIndex.from_breaks(breaks)._data
assert result._left.base is None or result._left.base is not result._right.base
| TestFromBreaks |
python | ray-project__ray | python/ray/util/collective/types.py | {
"start": 4222,
"end": 4339
} | class ____:
src_rank = 0
src_gpu_index = 0
n_elements = 0
unset_timeout_ms = unset_timeout_ms
| RecvOptions |
python | numpy__numpy | numpy/_core/tests/test_dtype.py | {
"start": 58480,
"end": 64374
} | class ____:
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
"""
@pytest.mark.parametrize(["other", "expected"],
[(2**16 - 1, np.complex64),
(2**32 - 1, np.complex64),
(np.float16(2), np.complex64),
(np.float32(2), np.complex64),
(np.longdouble(2), np.clongdouble),
# Base of the double value to sidestep any rounding issues:
(np.longdouble(np.nextafter(1.7e308, 0.)), np.clongdouble),
# Additionally use "nextafter" so the cast can't round down:
(np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
# repeat for complex scalars:
(np.complex64(2), np.complex64),
(np.clongdouble(2), np.clongdouble),
# Base of the double value to sidestep any rounding issues:
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.clongdouble),
# Additionally use "nextafter" so the cast can't round down:
(np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
])
def test_complex_other_value_based(self, other, expected):
# This would change if we modify the value based promotion
min_complex = np.dtype(np.complex64)
res = np.result_type(other, min_complex)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
assert res == expected
@pytest.mark.parametrize(["other", "expected"],
[(np.bool, np.complex128),
(np.int64, np.complex128),
(np.float16, np.complex64),
(np.float32, np.complex64),
(np.float64, np.complex128),
(np.longdouble, np.clongdouble),
(np.complex64, np.complex64),
(np.complex128, np.complex128),
(np.clongdouble, np.clongdouble),
])
def test_complex_scalar_value_based(self, other, expected):
# This would change if we modify the value based promotion
complex_scalar = 1j
res = np.result_type(other, complex_scalar)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
assert res == expected
def test_complex_pyscalar_promote_rational(self):
with pytest.raises(TypeError,
match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational)
with pytest.raises(TypeError,
match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational(1, 2))
@pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2 * 100])
def test_python_integer_promotion(self, val):
# If we only pass scalars (mainly python ones!), NEP 50 means
# that we get the default integer
expected_dtype = np.dtype(int) # the default integer
assert np.result_type(val, 0) == expected_dtype
# With NEP 50, the NumPy scalar wins though:
assert np.result_type(val, np.int8(0)) == np.int8
@pytest.mark.parametrize(["other", "expected"],
[(1, rational), (1., np.float64)])
def test_float_int_pyscalar_promote_rational(self, other, expected):
# Note that rationals are a bit awkward as they promote with float64
# or default ints, but not float16 or uint8/int8 (which looks
# inconsistent here). The new promotion fixed this (partially?)
assert np.result_type(other, rational) == expected
assert np.result_type(other, rational(1, 2)) == expected
@pytest.mark.parametrize(["dtypes", "expected"], [
# These promotions are not associative/commutative:
([np.uint16, np.int16, np.float16], np.float32),
([np.uint16, np.int8, np.float16], np.float32),
([np.uint8, np.int16, np.float16], np.float32),
# The following promotions are not ambiguous, but cover code
# paths of abstract promotion (no particular logic being tested)
([1, 1, np.float64], np.float64),
([1, 1., np.complex128], np.complex128),
([1, 1j, np.float64], np.complex128),
([1., 1., np.int64], np.float64),
([1., 1j, np.float64], np.complex128),
([1j, 1j, np.float64], np.complex128),
([1, True, np.bool], np.int_),
])
def test_permutations_do_not_influence_result(self, dtypes, expected):
# Tests that most permutations do not influence the result. In the
# above some uint and int combinations promote to a larger integer
# type, which would then promote to a larger than necessary float.
for perm in permutations(dtypes):
assert np.result_type(*perm) == expected
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x, x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, 'Fl\xfcgel')
def test_keyword_argument():
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
| TestPromotion |
python | pypa__build | src/build/env.py | {
"start": 1519,
"end": 4332
} | class ____(IsolatedEnv):
"""
Isolated environment which supports several different underlying implementations.
"""
def __init__(
self,
*,
installer: Installer = 'pip',
) -> None:
self.installer: Installer = installer
def __enter__(self) -> DefaultIsolatedEnv:
try:
path = tempfile.mkdtemp(prefix='build-env-')
# Call ``realpath`` to prevent spurious warning from being emitted
# that the venv location has changed on Windows for the venv impl.
# The username is DOS-encoded in the output of tempfile - the location is the same
# but the representation of it is different, which confuses venv.
# Ref: https://bugs.python.org/issue46171
path = os.path.realpath(path)
self._path = path
self._env_backend: _EnvBackend
# uv is opt-in only.
if self.installer == 'uv':
self._env_backend = _UvBackend()
else:
self._env_backend = _PipBackend()
_ctx.log(f'Creating isolated environment: {self._env_backend.display_name}...')
self._env_backend.create(self._path)
except Exception: # cleanup folder if creation fails
self.__exit__(*sys.exc_info())
raise
return self
def __exit__(self, *args: object) -> None:
if os.path.exists(self._path): # in case the user already deleted skip remove
shutil.rmtree(self._path)
@property
def path(self) -> str:
"""The location of the isolated build environment."""
return self._path
@property
def python_executable(self) -> str:
"""The python executable of the isolated build environment."""
return self._env_backend.python_executable
def make_extra_environ(self) -> dict[str, str]:
path = os.environ.get('PATH')
return {
'PATH': os.pathsep.join([self._env_backend.scripts_dir, path])
if path is not None
else self._env_backend.scripts_dir
}
def install(self, requirements: Collection[str]) -> None:
"""
Install packages from PEP 508 requirements in the isolated build environment.
:param requirements: PEP 508 requirement specification to install
:note: Passing non-PEP 508 strings will result in undefined behavior, you *should not* rely on it. It is
merely an implementation detail, it may change any time without warning.
"""
if not requirements:
return
_ctx.log('Installing packages in isolated environment:\n' + '\n'.join(f'- {r}' for r in sorted(requirements)))
self._env_backend.install_requirements(requirements)
| DefaultIsolatedEnv |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py | {
"start": 38881,
"end": 40806
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of the Qwen2_5OmniToken2WavBigVGAN module used in the Qwen2.5-Omni-Token2Wav model.
It defines the architecture of the BigVGAN model, which is used for converting mel-spectrograms to waveforms.
Args:
mel_dim (`int`, *optional*, defaults to 80):
The dimension of the mel-spectrogram.
upsample_initial_channel (`int`, *optional*, defaults to 1536):
The number of channels in the initial upsampling layer.
resblock_kernel_sizes (`list[int]`, *optional*, defaults to `[3, 7, 11]`):
A list of kernel sizes for each residual block.
resblock_dilation_sizes (`list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A list of dilation sizes for each residual block.
upsample_rates (`list[int]`, *optional*, defaults to `[5, 3, 2, 2, 2, 2]`):
A list of upsampling rates for each upsampling layer.
upsample_kernel_sizes (`list[int]`, *optional*, defaults to `[11, 7, 4, 4, 4, 4]`):
A list of kernel sizes for each upsampling layer.
"""
model_type = "qwen2_5_omni_bigvgan"
def __init__(
self,
mel_dim=80,
upsample_initial_channel=1536,
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
upsample_rates=[5, 3, 2, 2, 2, 2],
upsample_kernel_sizes=[11, 7, 4, 4, 4, 4],
**kwargs,
):
self.mel_dim = mel_dim
self.upsample_initial_channel = upsample_initial_channel
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
super().__init__(**kwargs)
| Qwen2_5OmniBigVGANConfig |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 10010,
"end": 10282
} | class ____(sqltypes.TEXT):
"""Provide the PostgreSQL CITEXT type.
.. versionadded:: 2.0.7
"""
__visit_name__ = "CITEXT"
def coerce_compared_value(
self, op: Optional[OperatorType], value: Any
) -> TypeEngine[Any]:
return self
| CITEXT |
python | kubernetes-client__python | kubernetes/base/config/kube_config.py | {
"start": 5616,
"end": 7009
} | class ____(object):
def __init__(self, cmd, args, tokenKey, expiryKey):
self._cmd = cmd
self._args = args
if not tokenKey:
self._tokenKey = '{.access_token}'
else:
self._tokenKey = tokenKey
if not expiryKey:
self._expiryKey = '{.token_expiry}'
else:
self._expiryKey = expiryKey
def token(self):
fullCmd = self._cmd + (" ") + " ".join(self._args)
process = subprocess.Popen(
[self._cmd] + self._args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
(stdout, stderr) = process.communicate()
exit_code = process.wait()
if exit_code != 0:
msg = 'cmd-path: process returned %d' % exit_code
msg += "\nCmd: %s" % fullCmd
stderr = stderr.strip()
if stderr:
msg += '\nStderr: %s' % stderr
raise ConfigException(msg)
try:
data = json.loads(stdout)
except ValueError as de:
raise ConfigException(
'exec: failed to decode process output: %s' % de)
A = namedtuple('A', ['token', 'expiry'])
return A(
token=data['credential']['access_token'],
expiry=parse_rfc3339(data['credential']['token_expiry']))
| CommandTokenSource |
python | allegroai__clearml | clearml/utilities/gpu/gpustat.py | {
"start": 640,
"end": 4755
} | class ____(object):
def __init__(self, entry: dict) -> None:
if not isinstance(entry, dict):
raise TypeError("entry should be a dict, {} given".format(type(entry)))
self.entry = entry
def keys(self) -> Any:
return self.entry.keys()
def get(self, key: Any, default: Any = None) -> Any:
return self.entry.get(key, default)
def __getitem__(self, key: Any) -> Any:
return self.entry[key]
@property
def index(self) -> int:
"""
Returns the index of GPU (as in nvidia-smi).
"""
return self.entry["index"]
@property
def uuid(self) -> str:
"""
Returns the uuid returned by nvidia-smi,
e.g. GPU-12345678-abcd-abcd-uuid-123456abcdef
"""
return self.entry["uuid"]
@property
def mig_index(self) -> Optional[int]:
"""
Returns the index of the MIG partition (as in nvidia-smi).
"""
return self.entry.get("mig_index")
@property
def mig_uuid(self) -> str:
"""
Returns the uuid of the MIG partition returned by nvidia-smi when running in MIG mode,
e.g. MIG-12345678-abcd-abcd-uuid-123456abcdef
"""
return self.entry.get("mig_uuid")
@property
def name(self) -> str:
"""
Returns the name of GPU card (e.g. Geforce Titan X)
"""
return self.entry["name"]
@property
def memory_total(self) -> int:
"""
Returns the total memory (in MB) as an integer.
"""
return int(self.entry["memory.total"])
@property
def memory_used(self) -> int:
"""
Returns the occupied memory (in MB) as an integer.
"""
return int(self.entry["memory.used"])
@property
def memory_free(self) -> int:
"""
Returns the free (available) memory (in MB) as an integer.
"""
v = self.memory_total - self.memory_used
return max(v, 0)
@property
def memory_available(self) -> int:
"""
Returns the available memory (in MB) as an integer.
Alias of memory_free.
"""
return self.memory_free
@property
def temperature(self) -> Optional[int]:
"""
Returns the temperature (in celcius) of GPU as an integer,
or None if the information is not available.
"""
v = self.entry["temperature.gpu"]
return int(v) if v is not None else None
@property
def fan_speed(self) -> Optional[int]:
"""
Returns the fan speed percentage (0-100) of maximum intended speed
as an integer, or None if the information is not available.
"""
v = self.entry["fan.speed"]
return int(v) if v is not None else None
@property
def utilization(self) -> Optional[int]:
"""
Returns the GPU utilization (in percentile),
or None if the information is not available.
"""
v = self.entry["utilization.gpu"]
return int(v) if v is not None else None
@property
def power_draw(self) -> Optional[int]:
"""
Returns the GPU power usage in Watts,
or None if the information is not available.
"""
v = self.entry["power.draw"]
return int(v) if v is not None else None
@property
def power_limit(self) -> Optional[int]:
"""
Returns the (enforced) GPU power limit in Watts,
or None if the information is not available.
"""
v = self.entry["enforced.power.limit"]
return int(v) if v is not None else None
@property
def processes(self) -> list:
"""
Get the list of running processes on the GPU.
"""
return self.entry["processes"]
def jsonify(self) -> dict:
o = dict(self.entry)
if self.entry["processes"] is not None:
o["processes"] = [{k: v for (k, v) in p.items() if k != "gpu_uuid"} for p in self.entry["processes"]]
else:
o["processes"] = "({})".format(NOT_SUPPORTED)
return o
| GPUStat |
python | getsentry__sentry | src/sentry/api/endpoints/organization_profiling_profiles.py | {
"start": 1018,
"end": 1196
} | class ____(OrganizationEventsV2EndpointBase):
owner = ApiOwner.PROFILING
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
| OrganizationProfilingBaseEndpoint |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/data_forwarding_details.py | {
"start": 2366,
"end": 13421
} | class ____(OrganizationEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"PUT": ApiPublishStatus.EXPERIMENTAL,
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (OrganizationDataForwardingDetailsPermission,)
def convert_args(
self,
request: Request,
organization_id_or_slug: int | str,
data_forwarder_id: int,
*args,
**kwargs,
):
args, kwargs = super().convert_args(request, organization_id_or_slug, *args, **kwargs)
if not features.has("organizations:data-forwarding-revamp-access", kwargs["organization"]):
raise PermissionDenied
if request.method == "PUT" and not features.has(
"organizations:data-forwarding", kwargs["organization"]
):
raise PermissionDenied
try:
data_forwarder = DataForwarder.objects.get(
id=data_forwarder_id,
organization=kwargs["organization"],
)
except DataForwarder.DoesNotExist:
raise ResourceDoesNotExist
kwargs["data_forwarder"] = data_forwarder
return args, kwargs
def _update_data_forwarder_config(
self, request: Request, organization: Organization, data_forwarder: DataForwarder
) -> Response | None:
"""
Request body: {"is_enabled": true, "enroll_new_projects": true, "provider": "segment", "config": {...}, "project_ids": [1, 2, 3]}
Returns:
Response: 200 OK with serialized data forwarder on success
None: If validation fails (signals caller to try other operations)
"""
data: dict[str, Any] = request.data
data["organization_id"] = organization.id
serializer = DataForwarderSerializer(
data_forwarder, data=data, context={"organization": organization}
)
if serializer.is_valid():
data_forwarder = serializer.save()
return Response(
serialize(data_forwarder, request.user),
status=status.HTTP_200_OK,
)
# Validation failed - return None to signal caller ddto try other operations
return None
def _validate_enrollment_changes(
self,
request: Request,
organization: Organization,
data_forwarder: DataForwarder,
) -> tuple[set[int], set[int]]:
"""
Request body: {"project_ids": [1, 2, 3]}
Validates enrollment changes:
- project IDs to be enrolled exist in the organization
- User has project:write on projects being enrolled
- User has project:write on projects being unenrolled
Returns:
Tuple of (project_ids_to_enroll, project_ids_to_unenroll)
"""
project_ids_new: set[int] = set(request.data.get("project_ids", []))
project_ids_current: set[int] = set(
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, is_enabled=True
).values_list("project_id", flat=True)
)
project_ids_to_enroll: set[int] = project_ids_new - project_ids_current
project_ids_to_unenroll: set[int] = project_ids_current - project_ids_new
all_project_ids: set[int] = project_ids_new | project_ids_to_unenroll
all_projects_by_id: dict[int, Project] = {
project.id: project
for project in Project.objects.filter(
organization_id=organization.id, id__in=all_project_ids
)
}
# Validate new project IDs being enrolled exist in the organization
missing_ids: set[int] = project_ids_to_enroll - all_projects_by_id.keys()
if missing_ids:
raise serializers.ValidationError(
{
"project_ids": [
f"Invalid project IDs for this organization: {', '.join(map(str, missing_ids))}"
]
}
)
# Validate permissions on all projects
# org:write users can enroll/unenroll any project in the organization
# project:write users need explicit permission on each project
if not request.access.has_scope("org:write"):
unauthorized_project_ids: set[int] = {
project_id
for project_id in all_projects_by_id.keys()
if not request.access.has_project_scope(
all_projects_by_id[project_id], "project:write"
)
}
if unauthorized_project_ids:
raise PermissionDenied(
detail={
"project_ids": [
f"Insufficient access to projects: {', '.join(map(str, unauthorized_project_ids))}"
]
}
)
return project_ids_to_enroll, project_ids_to_unenroll
def _update_enrollment(
self,
request: Request,
organization: Organization,
data_forwarder: DataForwarder,
) -> Response:
"""
Request body: {"project_ids": [1, 2, 3]}
"""
project_ids_to_enroll, project_ids_to_unenroll = self._validate_enrollment_changes(
request, organization, data_forwarder
)
with transaction.atomic(router.db_for_write(DataForwarderProject)):
existing_data_forwarder_projects: set[int] = set(
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, project_id__in=project_ids_to_enroll
).values_list("project_id", flat=True)
)
new_data_forwarder_projects: set[int] = (
project_ids_to_enroll - existing_data_forwarder_projects
)
DataForwarderProject.objects.bulk_create(
[
DataForwarderProject(
data_forwarder=data_forwarder,
project_id=project_id,
is_enabled=True,
)
for project_id in new_data_forwarder_projects
]
)
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, project_id__in=existing_data_forwarder_projects
).update(is_enabled=True)
DataForwarderProject.objects.filter(
data_forwarder=data_forwarder, project_id__in=project_ids_to_unenroll
).update(is_enabled=False)
return Response(
serialize(data_forwarder, request.user),
status=status.HTTP_200_OK,
)
def _update_single_project_configuration(
self,
request: Request,
organization: Organization,
data_forwarder: DataForwarder,
) -> Response:
"""
Request body: {"project_id": 1, "overrides": {...}, "is_enabled": true}
"""
project_id: int = request.data["project_id"]
try:
# Update existing configuration
project_config = DataForwarderProject.objects.get(
data_forwarder=data_forwarder,
project_id=project_id,
)
serializer = DataForwarderProjectSerializer(
project_config,
data={
"data_forwarder_id": data_forwarder.id,
"project": project_id,
"overrides": request.data.get("overrides", {}),
"is_enabled": request.data.get("is_enabled", project_config.is_enabled),
},
context={"organization": organization, "access": request.access},
)
except DataForwarderProject.DoesNotExist:
# Create new configuration
serializer = DataForwarderProjectSerializer(
data={
"data_forwarder_id": data_forwarder.id,
"project": project_id,
"overrides": request.data.get("overrides", {}),
"is_enabled": request.data.get("is_enabled", True),
},
context={"organization": organization, "access": request.access},
)
if serializer.is_valid():
serializer.save()
return Response(
serialize(data_forwarder, request.user),
status=status.HTTP_200_OK,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@set_referrer_policy("strict-origin-when-cross-origin")
@method_decorator(never_cache)
@extend_schema(
operation_id="Update a Data Forwarding Configuration for an Organization",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=DataForwarderSerializer,
responses={
200: DataForwarderModelSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
)
def put(
self, request: Request, organization: Organization, data_forwarder: DataForwarder
) -> Response:
# Determine operation type based on request body
has_project_ids = "project_ids" in request.data
has_project_id = "project_id" in request.data
if has_project_ids and has_project_id:
raise serializers.ValidationError(
"Cannot specify both 'project_ids' and 'project_id'. "
"Use 'project_ids' for bulk enrollment or 'project_id' with 'overrides' for single project update."
)
# org:write users can perform all operations
# Try to update main config first - if serializer is valid, use that
# Otherwise fall through to project-specific operations
if request.access.has_scope("org:write"):
response = self._update_data_forwarder_config(request, organization, data_forwarder)
if response is not None:
return response
# Project-specific operations
if has_project_ids:
return self._update_enrollment(request, organization, data_forwarder)
elif has_project_id:
return self._update_single_project_configuration(request, organization, data_forwarder)
else:
raise serializers.ValidationError(
"Must specify provider, config, project_ids, etc. for main config update, "
"'project_ids' for bulk enrollment, or 'project_id' for single project update."
)
@extend_schema(
operation_id="Delete a Data Forwarding Configuration for an Organization",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
},
)
def delete(
self, request: Request, organization: Organization, data_forwarder: DataForwarder
) -> Response:
data_forwarder.delete()
return self.respond(status=status.HTTP_204_NO_CONTENT)
| DataForwardingDetailsEndpoint |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 40996,
"end": 41352
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("tr_TR")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in TrTrProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in TrTrProvider.MONTH_NAMES.values()
| TestTrTr |
python | explosion__spaCy | spacy/lang/grc/__init__.py | {
"start": 508,
"end": 620
} | class ____(Language):
lang = "grc"
Defaults = AncientGreekDefaults
__all__ = ["AncientGreek"]
| AncientGreek |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/rendering.py | {
"start": 25975,
"end": 29792
} | class ____(
gym.Wrapper[ObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs
):
"""Randomly obstructs rendering with white noise patches.
If used with ``render_mode="rgb_array"`` and ``AddRenderObservation``, it will
make observations noisy.
The number of patches depends on how many pixels we want to obstruct.
Depending on the size of the patches, the environment may become
partially-observable, turning the MDP into a POMDP.
Example - Obstruct 50% of the pixels with patches of size 50x50 pixels:
>>> env = gym.make("LunarLander-v3", render_mode="rgb_array")
>>> env = ObstructView(env, obstructed_pixels_ratio=0.5, obstruction_width=50)
>>> env = HumanRendering(env)
>>> obs, _ = env.reset(seed=123)
>>> obs, *_ = env.step(env.action_space.sample())
"""
def __init__(
self,
env: gym.Env[ObsType, ActType],
obstructed_pixels_ratio: float,
obstruction_width: int,
is_noise_grayscale: bool = False,
):
"""Wrapper obstructs pixels with white noise patches.
Args:
env: The environment that is being wrapped
obstructed_pixels_ratio: the percentage of pixels obstructed with white noise
obstruction_width: the width of the obstruction patches
is_noise_grayscale: if True, RGB noise is converted to grayscale
"""
if not 0 <= obstructed_pixels_ratio < 1:
raise ValueError(
f"obstructed_pixels_ratio should be in the interval [0,1). Received {obstructed_pixels_ratio}"
)
if obstruction_width < 1:
raise ValueError(
f"obstruction_width should be larger or equal than 1. Received {obstruction_width}"
)
gym.utils.RecordConstructorArgs.__init__(
self,
obstructed_pixels_ratio=obstructed_pixels_ratio,
obstruction_width=obstruction_width,
is_noise_grayscale=is_noise_grayscale,
)
gym.Wrapper.__init__(self, env)
self.obstruction_centers_ratio = obstructed_pixels_ratio / obstruction_width**2
self.obstruction_width = obstruction_width
self.is_noise_grayscale = is_noise_grayscale
def render(self) -> RenderFrame:
"""Compute the render frames as specified by render_mode attribute during initialization of the environment, then add white noise patches."""
render_out = super().render()
render_shape = render_out.shape
n_pixels = render_shape[0] * render_shape[1]
n_obstructions = int(n_pixels * self.obstruction_centers_ratio)
centers = self.np_random.integers(0, n_pixels, n_obstructions)
centers = np.unravel_index(centers, (render_shape[0], render_shape[1]))
mask = np.zeros((render_shape[0], render_shape[1]), dtype=bool)
low = self.obstruction_width // 2
high = self.obstruction_width - low
for x, y in zip(*centers):
mask[
max(x - low, 0) : min(x + high, render_shape[0]),
max(y - low, 0) : min(y + high, render_shape[1]),
] = True
if self.is_noise_grayscale:
noise = (
self.np_random.integers(
(0, 0, 0),
255 * np.array([0.2989, 0.5870, 0.1140]),
size=render_out.shape,
dtype=np.uint8,
)
.sum(-1, keepdims=True)
.repeat(3, -1)
)
else:
noise = self.np_random.integers(
0,
255,
size=render_out.shape,
dtype=np.uint8,
)
return np.where(mask[..., None], noise, render_out)
| ObstructView |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_details.py | {
"start": 71321,
"end": 72228
} | class ____(APITestCase, ABC):
endpoint = "sentry-api-0-project-details"
method = "put"
def setUp(self) -> None:
self.org_slug = self.project.organization.slug
self.proj_slug = self.project.slug
self.login_as(user=self.user)
self._apply_old_date_to_project_and_org()
def _apply_old_date_to_project_and_org(self):
# We have to create the project and organization in the past, since we boost new orgs and projects to 100%
# automatically.
old_date = datetime.now(tz=timezone.utc) - timedelta(
minutes=NEW_MODEL_THRESHOLD_IN_MINUTES + 1
)
# We have to actually update the underneath db models because they are re-fetched, otherwise just the in-memory
# copy is mutated.
self.project.organization.update(date_added=old_date)
self.project.update(date_added=old_date)
| TestProjectDetailsBase |
python | coleifer__peewee | peewee.py | {
"start": 170882,
"end": 171589
} | class ____(Field):
field_type = 'UUID'
def db_value(self, value):
if isinstance(value, basestring) and len(value) == 32:
# Hex string. No transformation is necessary.
return value
elif isinstance(value, bytes) and len(value) == 16:
# Allow raw binary representation.
value = uuid.UUID(bytes=value)
if isinstance(value, uuid.UUID):
return value.hex
try:
return uuid.UUID(value).hex
except:
return value
def python_value(self, value):
if isinstance(value, uuid.UUID):
return value
return uuid.UUID(value) if value is not None else None
| UUIDField |
python | dask__distributed | distributed/http/scheduler/json.py | {
"start": 1746,
"end": 2159
} | class ____(RequestHandler):
@log_errors
def get(self):
r = [url[5:] for url, _, _ in routes if url.endswith(".json")]
self.render(
"json-index.html", routes=r, title="Index of JSON routes", **self.extra
)
routes: list[tuple] = [
(r"json/counts.json", CountsJSON, {}),
(r"json/identity.json", IdentityJSON, {}),
(r"json/index.html", IndexJSON, {}),
]
| IndexJSON |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 117161,
"end": 117678
} | class ____(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
| GunicornServer |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/misc_execution_tests/test_asset_backfill.py | {
"start": 3239,
"end": 114658
} | class ____(NamedTuple):
assets_by_repo_name: Mapping[str, Sequence[dg.AssetsDefinition]]
evaluation_time: datetime.datetime
# when backfilling "some" partitions, the subset of partitions of root assets in the backfill
# to target:
target_root_partition_keys: Optional[Sequence[str]]
last_storage_id_cursor_offset: Optional[int]
def scenario(
assets: Union[Mapping[str, Sequence[dg.AssetsDefinition]], Sequence[dg.AssetsDefinition]],
evaluation_time: Optional[datetime.datetime] = None,
target_root_partition_keys: Optional[Sequence[str]] = None,
last_storage_id_cursor_offset: Optional[int] = None,
) -> AssetBackfillScenario:
if isinstance(assets, list):
assets_by_repo_name = {"repo": assets}
else:
assets_by_repo_name = assets
return AssetBackfillScenario(
assets_by_repo_name=cast("Mapping[str, Sequence[AssetsDefinition]]", assets_by_repo_name),
evaluation_time=evaluation_time if evaluation_time else get_current_datetime(),
target_root_partition_keys=target_root_partition_keys,
last_storage_id_cursor_offset=last_storage_id_cursor_offset,
)
scenarios = {
"one_asset_one_partition": scenario(one_asset_one_partition),
"one_asset_one_partition_cursor_offset": scenario(
one_asset_one_partition, last_storage_id_cursor_offset=100
),
"one_asset_two_partitions": scenario(one_asset_two_partitions),
"two_assets_in_sequence_one_partition": scenario(two_assets_in_sequence_one_partition),
"two_assets_in_sequence_one_partition_cross_repo": scenario(
{
"repo1": [two_assets_in_sequence_one_partition[0]],
"repo2": [two_assets_in_sequence_one_partition[1]],
},
),
"two_assets_in_sequence_one_partition_cross_repo_cursor_offset": scenario(
{
"repo1": [two_assets_in_sequence_one_partition[0]],
"repo2": [two_assets_in_sequence_one_partition[1]],
},
last_storage_id_cursor_offset=100,
),
"two_assets_in_sequence_two_partitions": scenario(two_assets_in_sequence_two_partitions),
"two_assets_in_sequence_two_partitions_cursor_offset": scenario(
two_assets_in_sequence_two_partitions, last_storage_id_cursor_offset=100
),
"two_assets_in_sequence_fan_in_partitions": scenario(two_assets_in_sequence_fan_in_partitions),
"two_assets_in_sequence_fan_out_partitions": scenario(
two_assets_in_sequence_fan_out_partitions
),
"one_asset_self_dependency": scenario(
one_asset_self_dependency, create_datetime(year=2020, month=1, day=7, hour=4)
),
"self_dependant_asset_with_grouped_run_backfill_policy": scenario(
self_dependant_asset_with_grouped_run_backfill_policy,
create_datetime(year=2023, month=1, day=10),
),
"self_dependant_asset_with_single_run_backfill_policy": scenario(
self_dependant_asset_with_single_run_backfill_policy,
create_datetime(year=2023, month=1, day=10),
),
"non_partitioned_after_partitioned": scenario(
non_partitioned_after_partitioned, create_datetime(year=2020, month=1, day=7, hour=4)
),
"partitioned_after_non_partitioned": scenario(
partitioned_after_non_partitioned,
create_datetime(year=2020, month=1, day=7, hour=4),
),
"unpartitioned_after_dynamic_asset": scenario(unpartitioned_after_dynamic_asset),
"two_dynamic_assets": scenario(two_dynamic_assets),
"hourly_to_daily_partitions": scenario(
hourly_to_daily_partitions,
create_datetime(year=2013, month=1, day=7, hour=0),
target_root_partition_keys=[
"2013-01-05-22:00",
"2013-01-05-23:00",
"2013-01-06-00:00",
"2013-01-06-01:00",
],
),
"daily_to_hourly_partitions_non_contiguous": scenario(
daily_to_hourly_partitions,
create_datetime(year=2013, month=1, day=8, hour=0),
target_root_partition_keys=[
"2013-01-05",
"2013-01-07",
],
),
"root_assets_different_partitions": scenario(root_assets_different_partitions_same_downstream),
"hourly_with_nonexistent_downstream_daily_partition": scenario(
hourly_to_daily_partitions,
create_datetime(year=2013, month=1, day=7, hour=10),
target_root_partition_keys=[
"2013-01-07-05:00",
],
),
"multipartitioned_self_dependency": scenario(
multipartitioned_self_dependency, create_datetime(year=2020, month=1, day=7, hour=4)
),
}
@pytest.mark.parametrize(
"scenario_name, partition_keys, expected_target_asset_partitions",
[
(
"two_assets_in_sequence_fan_in_partitions",
["a_1", "a_2"],
[("asset1", "a_1"), ("asset1", "a_2"), ("asset2", "a")],
),
(
"two_assets_in_sequence_fan_out_partitions",
["a"],
[("asset1", "a"), ("asset2", "a_1"), ("asset2", "a_2"), ("asset2", "a_3")],
),
(
"non_partitioned_after_partitioned",
["2020-01-01", "2020-01-02"],
[("asset1", "2020-01-01"), ("asset1", "2020-01-02"), ("asset2", None)],
),
(
"partitioned_after_non_partitioned",
["2020-01-01", "2020-01-02"],
[
("asset1", None),
("asset2", None),
("asset3", "2020-01-01"),
("asset3", "2020-01-02"),
],
),
],
)
def test_from_asset_partitions_target_subset(
scenario_name, partition_keys, expected_target_asset_partitions
):
assets_by_repo_name = scenarios[scenario_name].assets_by_repo_name
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_data = AssetBackfillData.from_asset_partitions(
partition_names=partition_keys,
asset_graph=asset_graph,
asset_selection=list(asset_graph.materializable_asset_keys),
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=scenarios[scenario_name].evaluation_time.timestamp(),
)
assert backfill_data.target_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(dg.AssetKey(asset_key_str), partition_key)
for asset_key_str, partition_key in expected_target_asset_partitions
},
asset_graph=asset_graph,
)
def _get_asset_graph_view(
instance: DagsterInstance,
asset_graph: BaseAssetGraph,
evaluation_time: Optional[datetime.datetime] = None,
) -> AssetGraphView:
return AssetGraphView(
temporal_context=TemporalContext(
effective_dt=evaluation_time or get_current_datetime(), last_event_id=None
),
instance=instance,
asset_graph=asset_graph,
)
def _get_instance_queryer(
instance: DagsterInstance, asset_graph: BaseAssetGraph, evaluation_time: datetime.datetime
) -> CachingInstanceQueryer:
return _get_asset_graph_view(
instance, asset_graph, evaluation_time
).get_inner_queryer_for_back_compat()
def _launch_runs(
run_requests,
backfill_id,
asset_graph: RemoteWorkspaceAssetGraph,
instance,
assets_by_repo_name,
fail_idxs: Optional[set[int]] = None,
):
for idx, run_request in enumerate(run_requests):
asset_keys = run_request.asset_selection
assert asset_keys is not None
for idx, run_request in enumerate(
# very janky sort key, just make sure that the partition range and the asset keys are involved
sorted(run_requests, key=lambda x: sorted(str(x.asset_selection) + str(x.tags)))
):
asset_keys = run_request.asset_selection
assert asset_keys is not None
assets = assets_by_repo_name[
asset_graph.get_repository_handle(asset_keys[0]).repository_name
]
do_run(
all_assets=assets,
asset_keys=asset_keys,
partition_key=run_request.partition_key,
instance=instance,
failed_asset_keys=asset_keys if idx in (fail_idxs or set()) else [],
tags={**run_request.tags, BACKFILL_ID_TAG: backfill_id},
)
def _single_backfill_iteration(
backfill_id,
backfill_data,
asset_graph: RemoteWorkspaceAssetGraph,
instance,
assets_by_repo_name,
fail_idxs: Optional[set[int]] = None,
) -> AssetBackfillData:
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, backfill_data, asset_graph, instance
)
backfill_data = result.backfill_data
_launch_runs(
result.run_requests,
backfill_id,
asset_graph,
instance,
assets_by_repo_name,
fail_idxs=fail_idxs,
)
return backfill_data.with_run_requests_submitted(
result.run_requests,
_get_asset_graph_view(instance, asset_graph, backfill_data.backfill_start_datetime),
)
def _single_backfill_iteration_create_but_do_not_submit_runs(
backfill_id, backfill_data, asset_graph, instance, assets_by_repo_name
) -> AssetBackfillData:
# Patch the run execution to not actually execute the run, but instead just create it
with patch(
"dagster._core.execution.execute_in_process.ExecuteRunWithPlanIterable",
return_value=MagicMock(),
):
return _single_backfill_iteration(
backfill_id, backfill_data, asset_graph, instance, assets_by_repo_name
)
@pytest.mark.parametrize(
"some_or_all",
[
"all",
"some",
],
)
@pytest.mark.parametrize(
"failures",
[
"no_failures",
"root_failures",
"random_half_failures",
],
)
@pytest.mark.parametrize("scenario", list(scenarios.values()), ids=list(scenarios.keys()))
def test_scenario_to_completion(scenario: AssetBackfillScenario, failures: str, some_or_all: str):
with (
dg.instance_for_test() as instance,
environ(
{"ASSET_BACKFILL_CURSOR_OFFSET": str(scenario.last_storage_id_cursor_offset)}
if scenario.last_storage_id_cursor_offset
else {}
),
):
instance.add_dynamic_partitions("foo", ["a", "b"])
with (
freeze_time(scenario.evaluation_time),
partition_loading_context(scenario.evaluation_time, instance),
):
assets_by_repo_name = scenario.assets_by_repo_name
asset_graph = get_asset_graph(assets_by_repo_name)
if some_or_all == "all":
target_subset = AssetGraphSubset.all(asset_graph)
elif some_or_all == "some":
if scenario.target_root_partition_keys is None:
target_subset = make_random_subset(
asset_graph, instance, scenario.evaluation_time
)
else:
target_subset = make_subset_from_partition_keys(
scenario.target_root_partition_keys,
asset_graph,
instance,
evaluation_time=scenario.evaluation_time,
)
else:
assert False
backfill_data = AssetBackfillData.empty(
target_subset,
scenario.evaluation_time.timestamp(),
dynamic_partitions_store=instance,
)
if failures == "no_failures":
fail_asset_partitions: set[AssetKeyPartitionKey] = set()
elif failures == "root_failures":
fail_asset_partitions = set(
(
backfill_data.target_subset.filter_asset_keys(
asset_graph.root_materializable_asset_keys
)
).iterate_asset_partitions()
)
elif failures == "random_half_failures":
fail_asset_partitions = {
asset_partition
for asset_partition in backfill_data.target_subset.iterate_asset_partitions()
if hash(str(asset_partition.asset_key) + str(asset_partition.partition_key)) % 2
== 0
}
else:
assert False
run_backfill_to_completion(
asset_graph, assets_by_repo_name, backfill_data, fail_asset_partitions, instance
)
def test_self_dependant_asset_with_grouped_run_backfill_policy():
assets_by_repo_name = {"repo": self_dependant_asset_with_grouped_run_backfill_policy}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_def = self_dependant_asset_with_grouped_run_backfill_policy[0]
partitions = [
"2023-01-01",
"2023-01-02",
"2023-01-03",
"2023-01-04",
"2023-01-05",
"2023-01-06",
"2023-01-07",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "self_dependant_asset_with_grouped_run_backfill_policy"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[asset_def.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, partition)
for partition in [
"2023-01-01",
"2023-01-02",
"2023-01-03",
]
},
asset_graph,
)
assert instance.get_runs_count() == 1
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert instance.get_runs_count() == 2
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, partition)
for partition in [
"2023-01-01",
"2023-01-02",
"2023-01-03",
"2023-01-04",
"2023-01-05",
"2023-01-06",
]
},
asset_graph,
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert instance.get_runs_count() == 3
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, partition)
for partition in [
"2023-01-01",
"2023-01-02",
"2023-01-03",
"2023-01-04",
"2023-01-05",
"2023-01-06",
"2023-01-07",
]
},
asset_graph,
)
def test_self_dependant_asset_downstream_of_regular_asset_single_run_backfill_policies():
with environ({"ASSET_BACKFILL_CURSOR_OFFSET": "10000"}):
assets_by_repo_name = {"repo": self_dependant_asset_downstream_of_regular_asset}
asset_graph = get_asset_graph(assets_by_repo_name)
regular_asset_key = AssetKey(["regular_asset"])
self_dependant_asset_key = AssetKey(["self_dependant"])
partitions = [
"2023-01-01",
"2023-01-02",
"2023-01-03",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "self_dependant_asset_downstream_of_regular_asset"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[regular_asset_key, self_dependant_asset_key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert (
asset_backfill_data.requested_subset
== AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(regular_asset_key, partition)
for partition in partitions
},
asset_graph,
)
)
assert instance.get_runs_count() == 1
run_requests = []
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
run_requests.extend(result.run_requests)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
result.run_requests,
_get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
),
)
assert (
asset_backfill_data.requested_subset
== AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01")}.union(
{
AssetKeyPartitionKey(regular_asset_key, partition)
for partition in partitions
}
),
asset_graph,
)
)
assert instance.get_runs_count() == 1
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
run_requests.extend(result.run_requests)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
result.run_requests,
_get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
),
)
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
# if nothing new has been materialized, no new runs should launch
assert (
asset_backfill_data.requested_subset
== AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01")}.union(
{
AssetKeyPartitionKey(regular_asset_key, partition)
for partition in partitions
}
),
asset_graph,
)
)
assert instance.get_runs_count() == 1
_launch_runs(
run_requests,
backfill_id,
asset_graph,
instance,
assets_by_repo_name,
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# once the upstream actually materializes, the downstream should launch
assert (
asset_backfill_data.requested_subset
== AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01"),
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-02"),
}.union(
{
AssetKeyPartitionKey(regular_asset_key, partition)
for partition in partitions
}
),
asset_graph,
)
)
def test_self_dependant_asset_downstream_of_regular_asset_multiple_run_backfill_policies():
assets_by_repo_name: dict[str, list[AssetsDefinition]] = {
"repo": self_dependant_asset_downstream_of_regular_asset_multiple_run
}
asset_graph = get_asset_graph(assets_by_repo_name)
regular_asset_key = AssetKey(["regular_asset"])
self_dependant_asset_key = AssetKey(["self_dependant"])
partitions = [
"2023-01-01",
"2023-01-02",
"2023-01-03",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "self_dependant_asset_with_grouped_run_backfill_policy"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[regular_asset_key, self_dependant_asset_key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(regular_asset_key, partition) for partition in partitions},
asset_graph,
)
assert instance.get_runs_count() == 3
for i in range(len(partitions)):
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert instance.get_runs_count() == 4 + i
assert (
asset_backfill_data.requested_subset
== AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(regular_asset_key, partition)
for partition in partitions
}.union(
{
AssetKeyPartitionKey(self_dependant_asset_key, partition)
for partition in partitions[: i + 1]
}
),
asset_graph,
)
)
def test_can_submit_additional_runs_without_any_materializations():
assets_by_repo_name: dict[str, list[AssetsDefinition]] = {
"repo": regular_asset_downstream_of_self_dependant_asset
}
asset_graph = get_asset_graph(assets_by_repo_name)
self_dependant_asset_key = AssetKey(["self_dependant"])
regular_asset_key = AssetKey(["regular_asset"])
partitions = [
"2023-01-01",
"2023-01-02",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "regular_asset_downstream_of_self_dependant_asset"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[regular_asset_key, self_dependant_asset_key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01")}, asset_graph
)
assert instance.get_runs_count() == 1
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
run_requests = list(result.run_requests)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
result.run_requests,
_get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
),
)
# doesn't materialize the downstream asset yet because its still in the middle of
# materializing the next partition of the upstream asset
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01"),
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-02"),
},
asset_graph,
)
# but on the next iteration, the eligible downstream asset is requested now that the upstream asset
# is no longer in the middle of being materialized
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
assert len(result.run_requests) == 1
run_requests.extend(result.run_requests)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
result.run_requests,
_get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
),
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-01"),
AssetKeyPartitionKey(self_dependant_asset_key, "2023-01-02"),
AssetKeyPartitionKey(regular_asset_key, "2023-01-01"),
},
asset_graph,
)
# but then stabilizes until more upstreams come in
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
asset_backfill_data = result.backfill_data
assert not result.run_requests
_launch_runs(
run_requests,
backfill_id,
asset_graph,
instance,
assets_by_repo_name,
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == asset_backfill_data.target_subset
def test_matching_partitions_with_different_subsets():
assets_by_repo_name = {"repo": matching_partitions_with_different_subsets}
asset_graph = get_asset_graph(assets_by_repo_name)
# target a subset that results in different subsets being excluded from parent
# and child (the parts of parent that are downstream of grandparent get filtered out,
# and the parts of child that are downstream of other_parent get filtered out)
# targeting:
# grandparent 2023-01-01
# parent: 2023-01-01 to 2023-01-09
# other_parent: 2023-01-09
# child: 2023-01-01 to 2020-01-09
target_asset_graph_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey(["grandparent"]): asset_graph.get(
AssetKey(["grandparent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 2),
)
),
AssetKey(["parent"]): asset_graph.get(
AssetKey(["parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 10),
)
),
AssetKey(["child"]): asset_graph.get(
AssetKey(["child"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 10),
)
),
AssetKey(["other_parent"]): asset_graph.get(
AssetKey(["other_parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 9),
end=create_datetime(2023, 1, 10),
)
),
},
non_partitioned_asset_keys=set(),
)
with DagsterInstance.ephemeral() as instance:
backfill_id = "matching_partitions_with_different_requested_subsets"
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=target_asset_graph_subset,
dynamic_partitions_store=instance,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# request on first iteration:
# grandparent 2023-01-01
# other_parent: 2023-01-09
assert asset_backfill_data.requested_subset == AssetGraphSubset(
non_partitioned_asset_keys=set(),
partitions_subsets_by_asset_key={
AssetKey(["grandparent"]): asset_graph.get(
AssetKey(["grandparent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 2),
)
),
AssetKey(["other_parent"]): asset_graph.get(
AssetKey(["other_parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 9),
end=create_datetime(2023, 1, 10),
),
),
},
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == target_asset_graph_subset
def test_matching_partitions_with_different_subsets_failure():
assets_by_repo_name = {"repo": matching_partitions_with_different_subsets}
asset_graph = get_asset_graph(assets_by_repo_name)
# target a subset that results in different subsets being excluded from parent
# and child (the parts of parent that are downstream of grandparent get filtered out,
# and the parts of child that are downstream of other_parent get filtered out)
target_asset_graph_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey(["grandparent"]): asset_graph.get(
AssetKey(["grandparent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 2),
)
),
AssetKey(["parent"]): asset_graph.get(
AssetKey(["parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 10),
)
),
AssetKey(["child"]): asset_graph.get(
AssetKey(["child"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 10),
)
),
AssetKey(["other_parent"]): asset_graph.get(
AssetKey(["other_parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 9),
end=create_datetime(2023, 1, 10),
)
),
},
non_partitioned_asset_keys=set(),
)
with DagsterInstance.ephemeral() as instance:
backfill_id = "matching_partitions_with_different_requested_subsets"
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=target_asset_graph_subset,
dynamic_partitions_store=instance,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_graph_view = _get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
)
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
unlaunched_run_requests = list(result.run_requests)
assert len(unlaunched_run_requests) == 2
# sort the run requests to that the grandparent one is first
unlaunched_run_requests.sort(key=lambda x: sorted(str(x.asset_selection)), reverse=True)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
unlaunched_run_requests,
asset_graph_view,
)
assert asset_backfill_data.requested_subset == AssetGraphSubset(
non_partitioned_asset_keys=set(),
partitions_subsets_by_asset_key={
AssetKey(["grandparent"]): asset_graph.get(
AssetKey(["grandparent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 2),
)
),
AssetKey(["other_parent"]): asset_graph.get(
AssetKey(["other_parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 9),
end=create_datetime(2023, 1, 10),
),
),
},
)
# fail the grandparent run request, leave the other_parent run request un-materialized
_launch_runs(
unlaunched_run_requests[0:1],
backfill_id,
asset_graph,
instance,
assets_by_repo_name,
fail_idxs={0},
)
unlaunched_run_requests = unlaunched_run_requests[1:]
# Next iteration requests the remainder of child that is now eligible, even though
# other_parent has not materialized yet (since other_parent is not being materialized this tick)
result = execute_asset_backfill_iteration_consume_generator(
backfill_id, asset_backfill_data, asset_graph, instance
)
asset_backfill_data = result.backfill_data.with_run_requests_submitted(
result.run_requests,
asset_graph_view,
)
assert asset_backfill_data.requested_subset == AssetGraphSubset(
non_partitioned_asset_keys=set(),
partitions_subsets_by_asset_key={
AssetKey(["grandparent"]): asset_graph.get(
AssetKey(["grandparent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 2),
)
),
AssetKey(["parent"]): asset_graph.get(
AssetKey(["parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 2),
end=create_datetime(2023, 1, 10),
)
),
AssetKey(["other_parent"]): asset_graph.get(
AssetKey(["other_parent"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 9),
end=create_datetime(2023, 1, 10),
),
),
},
)
unlaunched_run_requests.extend(list(result.run_requests))
_launch_runs(
unlaunched_run_requests,
backfill_id,
asset_graph,
instance,
assets_by_repo_name,
)
# do last iteration
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# not all things were requested because some upstreams failed
assert asset_backfill_data.requested_subset != target_asset_graph_subset
# but everything was either requested or failed
assert (
asset_backfill_data.requested_subset | asset_backfill_data.failed_and_downstream_subset
) == target_asset_graph_subset
def test_child_with_two_parents_with_identical_partitions_same_subsets():
assets_by_repo_name = {"repo": child_with_two_parents_with_identical_partitions}
asset_graph = get_asset_graph(assets_by_repo_name)
# target the same subset in both parents and child, so everything is grouped together
target_asset_graph_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey(["parent_a"]): asset_graph.get(
AssetKey(["parent_a"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 3),
)
),
AssetKey(["parent_b"]): asset_graph.get(
AssetKey(["parent_b"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 3),
)
),
AssetKey(["child"]): asset_graph.get(
AssetKey(["child"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 3),
)
),
},
non_partitioned_asset_keys=set(),
)
with DagsterInstance.ephemeral() as instance:
backfill_id = "child_with_two_parents_with_identical_partitions_same_subsets"
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=target_asset_graph_subset,
dynamic_partitions_store=instance,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# requests everything in the first iteration
assert asset_backfill_data.requested_subset == target_asset_graph_subset
def test_child_with_two_parents_with_identical_partitions_different_subsets():
assets_by_repo_name = {"repo": child_with_two_parents_with_identical_partitions}
asset_graph = get_asset_graph(assets_by_repo_name)
# target the same subset in one parent and one child, but a different subset in another
# parent - so the parents need to run before the child does
target_asset_graph_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={
AssetKey(["parent_a"]): asset_graph.get(
AssetKey(["parent_a"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 4),
)
),
AssetKey(["parent_b"]): asset_graph.get(
AssetKey(["parent_b"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 3),
)
),
AssetKey(["child"]): asset_graph.get(
AssetKey(["child"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 4),
)
),
},
non_partitioned_asset_keys=set(),
)
with DagsterInstance.ephemeral() as instance:
backfill_id = "child_with_two_parents_with_identical_partitions_different_subsets"
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=target_asset_graph_subset,
dynamic_partitions_store=instance,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# doesn't try to materialize child yet
assert asset_backfill_data.requested_subset == AssetGraphSubset(
non_partitioned_asset_keys=set(),
partitions_subsets_by_asset_key={
AssetKey(["parent_a"]): asset_graph.get(
AssetKey(["parent_a"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 4),
)
),
AssetKey(["parent_b"]): asset_graph.get(
AssetKey(["parent_b"])
).partitions_def.get_partition_subset_in_time_window( # type: ignore
TimeWindow(
start=create_datetime(2023, 1, 1),
end=create_datetime(2023, 1, 3),
)
),
},
)
# materializes child on the next iteration
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == target_asset_graph_subset
def test_self_dependant_asset_with_single_run_backfill_policy():
assets_by_repo_name = {"repo": self_dependant_asset_with_single_run_backfill_policy}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_def = self_dependant_asset_with_single_run_backfill_policy[0]
partitions = [
"2023-01-01",
"2023-01-02",
"2023-01-03",
"2023-01-04",
"2023-01-05",
"2023-01-06",
"2023-01-07",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "self_dependant_asset_with_single_run_backfill_policy"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[asset_def.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, partition)
for partition in [
"2023-01-01",
"2023-01-02",
"2023-01-03",
"2023-01-04",
"2023-01-05",
"2023-01-06",
"2023-01-07",
]
},
asset_graph,
)
assert instance.get_runs_count() == 1
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert instance.get_runs_count() == 1
def test_self_dependant_asset_with_no_backfill_policy():
assets_by_repo_name = {"repo": self_dependant_asset_with_no_backfill_policy}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_def = self_dependant_asset_with_no_backfill_policy[0]
partitions = [
"2023-01-01",
"2023-01-02",
"2023-01-03",
]
with DagsterInstance.ephemeral() as instance:
backfill_id = "self_dependant_asset_with_no_backfill_policy"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=partitions,
asset_selection=[asset_def.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 12, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(asset_def.key, "2023-01-01")},
asset_graph,
)
assert instance.get_runs_count() == 1
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, "2023-01-01"),
AssetKeyPartitionKey(asset_def.key, "2023-01-02"),
},
asset_graph,
)
assert instance.get_runs_count() == 2
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(asset_def.key, "2023-01-01"),
AssetKeyPartitionKey(asset_def.key, "2023-01-02"),
AssetKeyPartitionKey(asset_def.key, "2023-01-03"),
},
asset_graph,
)
assert instance.get_runs_count() == 3
def test_materializations_outside_of_backfill():
assets_by_repo_name = {"repo": one_asset_one_partition}
asset_graph = get_asset_graph(assets_by_repo_name)
instance = DagsterInstance.ephemeral()
do_run(
all_assets=one_asset_one_partition,
asset_keys=[one_asset_one_partition[0].key],
partition_key=cast(
"dg.PartitionsDefinition", one_asset_one_partition[0].partitions_def
).get_partition_keys()[0],
instance=instance,
tags={},
)
run_backfill_to_completion(
instance=instance,
asset_graph=asset_graph,
assets_by_repo_name=assets_by_repo_name,
backfill_data=make_backfill_data("all", asset_graph, instance, None), # pyright: ignore[reportArgumentType]
fail_asset_partitions=set(),
)
def test_materialization_outside_of_backfill_range_during_backfill():
@dg.asset()
def upstream():
pass
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
)
def downstream(upstream):
pass
assets_by_repo_name = {"repo": [upstream, downstream]}
asset_graph = get_asset_graph(assets_by_repo_name)
with DagsterInstance.ephemeral() as instance:
backfill_id = "dummy_backfill_id"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["2023-01-01"],
asset_selection=[downstream.key, upstream.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 9, 0, 0, 0).timestamp(),
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{AssetKeyPartitionKey(dg.AssetKey("upstream"), None)}, asset_graph
)
# Downstream asset creates a new materialization 'from the future' (outside
# of the backfill range) and upstream asset creates a new materialization as well
# (the downstream materialization 'from the future' should be ignored)
do_run(
all_assets=[upstream, downstream],
asset_keys=[upstream.key],
partition_key=None,
instance=instance,
)
do_run(
all_assets=[upstream, downstream],
asset_keys=[downstream.key],
partition_key="2023-01-12",
instance=instance,
)
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == AssetGraphSubset.from_asset_partition_set(
{
AssetKeyPartitionKey(dg.AssetKey("upstream"), None),
AssetKeyPartitionKey(dg.AssetKey("downstream"), "2023-01-01"),
},
asset_graph,
)
def test_do_not_rerequest_while_existing_run_in_progress():
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
)
def upstream():
pass
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
)
def downstream(upstream):
pass
assets_by_repo_name = {"repo": [upstream, downstream]}
asset_graph = get_asset_graph(assets_by_repo_name)
instance = DagsterInstance.ephemeral()
backfill_id = "dummy_backfill_id"
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["2023-01-01"],
asset_selection=[downstream.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 9, 0, 0, 0).timestamp(),
)
do_run(
all_assets=[upstream],
asset_keys=[upstream.key],
partition_key="2023-01-01",
instance=instance,
)
asset_backfill_data = _single_backfill_iteration_create_but_do_not_submit_runs(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert (
AssetKeyPartitionKey(downstream.key, partition_key="2023-01-01")
in asset_backfill_data.requested_subset
)
# Run for 2023-01-01 exists and is in progress, but has not materialized
backfill_runs = instance.get_runs(dg.RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))
assert len(backfill_runs) == 1
assert backfill_runs[0].tags.get(PARTITION_NAME_TAG) == "2023-01-01"
assert backfill_runs[0].status == DagsterRunStatus.NOT_STARTED
do_run(
all_assets=[upstream],
asset_keys=[upstream.key],
partition_key="2023-01-01",
instance=instance,
)
_single_backfill_iteration_create_but_do_not_submit_runs(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
# Confirm that no additional runs for 2023-01-02 are kicked off
assert len(instance.get_runs(dg.RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))) == 1
def make_backfill_data(
some_or_all: str,
asset_graph: RemoteWorkspaceAssetGraph,
instance: DagsterInstance,
current_time: datetime.datetime,
) -> AssetBackfillData:
if some_or_all == "all":
with partition_loading_context(current_time, instance):
target_subset = AssetGraphSubset.all(asset_graph)
elif some_or_all == "some":
target_subset = make_random_subset(asset_graph, instance, current_time)
else:
assert False
return AssetBackfillData.empty(
target_subset,
current_time.timestamp() if current_time else get_current_timestamp(),
dynamic_partitions_store=instance,
)
def make_random_subset(
asset_graph: RemoteWorkspaceAssetGraph,
instance: DagsterInstance,
evaluation_time: datetime.datetime,
) -> AssetGraphSubset:
# all partitions downstream of half of the partitions in each partitioned root asset
root_asset_partitions: set[AssetKeyPartitionKey] = set()
for i, root_asset_key in enumerate(sorted(asset_graph.root_materializable_asset_keys)):
partitions_def = asset_graph.get(root_asset_key).partitions_def
if partitions_def is not None:
partition_keys = list(
partitions_def.get_partition_keys(
dynamic_partitions_store=instance, current_time=evaluation_time
)
)
start_index = len(partition_keys) // 2
chosen_partition_keys = partition_keys[start_index:]
root_asset_partitions.update(
AssetKeyPartitionKey(root_asset_key, partition_key)
for partition_key in chosen_partition_keys
)
else:
if i % 2 == 0:
root_asset_partitions.add(AssetKeyPartitionKey(root_asset_key, None))
asset_graph_view = _get_asset_graph_view(instance, asset_graph, evaluation_time=evaluation_time)
return bfs_filter_asset_graph_view(
asset_graph_view=asset_graph_view,
condition_fn=lambda candidate_asset_graph_subset, _: (
AssetGraphViewBfsFilterConditionResult(
passed_asset_graph_subset=candidate_asset_graph_subset,
excluded_asset_graph_subsets_and_reasons=[],
)
),
initial_asset_graph_subset=AssetGraphSubset.from_asset_partition_set(
root_asset_partitions, asset_graph
),
include_full_execution_set=True,
)[0]
def make_subset_from_partition_keys(
partition_keys: Sequence[str],
asset_graph: RemoteWorkspaceAssetGraph,
instance: DagsterInstance,
evaluation_time: datetime.datetime,
) -> AssetGraphSubset:
root_asset_partitions: set[AssetKeyPartitionKey] = set()
for i, root_asset_key in enumerate(sorted(asset_graph.root_materializable_asset_keys)):
if asset_graph.get(root_asset_key).is_partitioned:
root_asset_partitions.update(
AssetKeyPartitionKey(root_asset_key, partition_key)
for partition_key in partition_keys
)
else:
root_asset_partitions.add(AssetKeyPartitionKey(root_asset_key, None))
asset_graph_view = _get_asset_graph_view(instance, asset_graph, evaluation_time=evaluation_time)
return bfs_filter_asset_graph_view(
asset_graph_view=asset_graph_view,
condition_fn=lambda candidate_asset_graph_subset, _: (
AssetGraphViewBfsFilterConditionResult(
passed_asset_graph_subset=candidate_asset_graph_subset,
excluded_asset_graph_subsets_and_reasons=[],
)
),
initial_asset_graph_subset=AssetGraphSubset.from_asset_partition_set(
root_asset_partitions, asset_graph
),
include_full_execution_set=True,
)[0]
def get_asset_graph(
assets_by_repo_name: Mapping[str, Sequence[dg.AssetsDefinition]],
) -> RemoteWorkspaceAssetGraph:
assets_defs_by_key = {
key: assets_def
for assets in assets_by_repo_name.values()
for assets_def in assets
for key in assets_def.keys
}
with patch(
"dagster._core.remote_representation.external_data.get_builtin_partition_mapping_types"
) as get_builtin_partition_mapping_types:
get_builtin_partition_mapping_types.return_value = tuple(
assets_def.infer_partition_mapping(
next(iter(assets_def.keys)),
dep_key,
assets_defs_by_key[dep_key].specs_by_key[dep_key].partitions_def,
).__class__
for assets in assets_by_repo_name.values()
for assets_def in assets
for dep_key in assets_def.dependency_keys
)
return remote_asset_graph_from_assets_by_repo_name(assets_by_repo_name)
def execute_asset_backfill_iteration_consume_generator(
backfill_id: str,
asset_backfill_data: AssetBackfillData,
asset_graph: RemoteWorkspaceAssetGraph,
instance: DagsterInstance,
) -> AssetBackfillIterationResult:
counter = Counter()
traced_counter.set(counter)
with environ({"ASSET_BACKFILL_CURSOR_DELAY_TIME": "0"}):
result = execute_asset_backfill_iteration_inner(
backfill_id=backfill_id,
asset_backfill_data=asset_backfill_data,
asset_graph_view=_get_asset_graph_view(
instance, asset_graph, asset_backfill_data.backfill_start_datetime
),
backfill_start_timestamp=asset_backfill_data.backfill_start_timestamp,
logger=logging.getLogger("fake_logger"),
run_config=None,
)
assert counter.counts().get("DagsterInstance.get_dynamic_partitions", 0) <= 1
return result
assert False
def run_backfill_to_completion(
asset_graph: RemoteWorkspaceAssetGraph,
assets_by_repo_name: Mapping[str, Sequence[dg.AssetsDefinition]],
backfill_data: AssetBackfillData,
fail_asset_partitions: Iterable[AssetKeyPartitionKey],
instance: DagsterInstance,
) -> tuple[AssetBackfillData, AbstractSet[AssetKeyPartitionKey], AbstractSet[AssetKeyPartitionKey]]:
iteration_count = 0
instance = instance or DagsterInstance.ephemeral()
backfill_id = "backfillid_x"
# assert each asset partition only targeted once
requested_asset_partitions: set[AssetKeyPartitionKey] = set()
asset_graph_view = _get_asset_graph_view(instance, asset_graph)
fail_and_downstream_asset_graph_subset, _ = bfs_filter_asset_graph_view(
asset_graph_view=asset_graph_view,
condition_fn=lambda candidate_asset_graph_subset, _: AssetGraphViewBfsFilterConditionResult(
passed_asset_graph_subset=candidate_asset_graph_subset,
excluded_asset_graph_subsets_and_reasons=[],
),
initial_asset_graph_subset=AssetGraphSubset.from_asset_partition_set(
set(fail_asset_partitions), asset_graph
),
include_full_execution_set=True,
)
fail_and_downstream_asset_partitions = set(
fail_and_downstream_asset_graph_subset.iterate_asset_partitions()
)
while not backfill_is_complete(
backfill_id=backfill_id,
backfill_data=backfill_data,
instance=instance,
logger=logging.getLogger("fake_logger"),
):
iteration_count += 1
result1 = execute_asset_backfill_iteration_consume_generator(
backfill_id=backfill_id,
asset_backfill_data=backfill_data,
asset_graph=asset_graph,
instance=instance,
)
assert result1.backfill_data != backfill_data
backfill_data_with_submitted_runs = result1.backfill_data.with_run_requests_submitted(
result1.run_requests,
_get_asset_graph_view(
instance, asset_graph, evaluation_time=backfill_data.backfill_start_datetime
),
)
# once everything that was requested is added to the requested subset, nothing should change if the iteration repeats
result2 = execute_asset_backfill_iteration_consume_generator(
backfill_id=backfill_id,
asset_backfill_data=backfill_data_with_submitted_runs,
asset_graph=asset_graph,
instance=instance,
)
assert result2.backfill_data == backfill_data_with_submitted_runs
assert result2.run_requests == []
backfill_data = result1.backfill_data
for asset_partition in backfill_data.materialized_subset.iterate_asset_partitions():
with partition_loading_context(backfill_data.backfill_start_datetime, instance):
parent_partitions_result = asset_graph.get_parents_partitions(*asset_partition)
for parent_asset_partition in parent_partitions_result.parent_partitions:
if (
parent_asset_partition in backfill_data.target_subset
and parent_asset_partition not in backfill_data.materialized_subset
):
assert False, (
f"{asset_partition} was materialized before its parent"
f" {parent_asset_partition},"
)
for run_request in result1.run_requests:
asset_keys = run_request.asset_selection
assert asset_keys is not None
requested_asset_partitions.update(
_requested_asset_partitions_in_run_request(run_request, asset_graph)
)
assert all(
asset_graph.get_repository_handle(asset_keys[0])
== asset_graph.get_repository_handle(asset_key)
for asset_key in asset_keys
)
assets = assets_by_repo_name[
asset_graph.get_repository_handle(asset_keys[0]).repository_name
]
asset_key_partition_keys = set()
for asset_key in asset_keys:
if run_request.partition_key_range:
asset_key_partition_keys = asset_key_partition_keys.union(
asset_graph_view.get_entity_subset_in_range(
asset_key, run_request.partition_key_range
).expensively_compute_asset_partitions()
)
else:
asset_key_partition_keys.add(
AssetKeyPartitionKey(asset_key, run_request.partition_key)
)
failed_asset_keys = list(
{
matching_fail_asset_partition.asset_key
for matching_fail_asset_partition in (
set(fail_asset_partitions) & asset_key_partition_keys
)
}
)
do_run(
all_assets=assets,
asset_keys=asset_keys,
partition_key=run_request.partition_key,
instance=instance,
failed_asset_keys=failed_asset_keys,
tags={**run_request.tags, BACKFILL_ID_TAG: backfill_id},
)
assert iteration_count <= len(requested_asset_partitions) + 1
return backfill_data, requested_asset_partitions, fail_and_downstream_asset_partitions
def _requested_asset_partitions_in_run_request(
run_request: RunRequest, asset_graph: BaseAssetGraph
) -> set[AssetKeyPartitionKey]:
asset_keys = run_request.asset_selection
assert asset_keys is not None
requested_asset_partitions = set()
partition_range_start = run_request.tags.get(ASSET_PARTITION_RANGE_START_TAG)
partition_range_end = run_request.tags.get(ASSET_PARTITION_RANGE_END_TAG)
if partition_range_start and partition_range_end and run_request.partition_key is None:
# backfill was a chunked backfill
partition_range = dg.PartitionKeyRange(
start=partition_range_start,
end=partition_range_end,
)
asset_partitions = []
for asset_key in asset_keys:
asset_partitions.extend(
asset_graph.get_partitions_in_range(
asset_key=asset_key, partition_key_range=partition_range
)
)
duplicate_asset_partitions = set(asset_partitions) & requested_asset_partitions
assert len(duplicate_asset_partitions) == 0, (
f" {duplicate_asset_partitions} requested twice. Requested:"
f" {requested_asset_partitions}."
)
requested_asset_partitions.update(asset_partitions)
else:
# backfill was a partition by partition backfill
for asset_key in asset_keys:
asset_partition = AssetKeyPartitionKey(asset_key, run_request.partition_key)
assert asset_partition not in requested_asset_partitions, (
f"{asset_partition} requested twice. Requested: {requested_asset_partitions}."
)
requested_asset_partitions.add(asset_partition)
return requested_asset_partitions
def remote_asset_graph_from_assets_by_repo_name(
assets_by_repo_name: Mapping[str, Sequence[dg.AssetsDefinition]],
) -> RemoteWorkspaceAssetGraph:
repos = []
for repo_name, assets in assets_by_repo_name.items():
@dg.repository(name=repo_name)
def repo(assets=assets):
return assets
repos.append(repo)
return mock_workspace_from_repos(repos).asset_graph
@pytest.mark.parametrize(
"static_serialization",
[
(
r'{"requested_runs_for_target_roots": false, "serialized_target_subset":'
r' {"partitions_subsets_by_asset_key": {"static_asset": "{\"version\": 1, \"subset\":'
r' [\"b\", \"d\", \"c\", \"a\", \"e\", \"f\"]}"}, "non_partitioned_asset_keys": []},'
r' "latest_storage_id": null, "serialized_requested_subset":'
r' {"partitions_subsets_by_asset_key": {}, "non_partitioned_asset_keys": []},'
r' "serialized_materialized_subset": {"partitions_subsets_by_asset_key": {},'
r' "non_partitioned_asset_keys": []}, "serialized_failed_subset":'
r' {"partitions_subsets_by_asset_key": {}, "non_partitioned_asset_keys": []}}'
),
(
r'{"requested_runs_for_target_roots": false, "serialized_target_subset":'
r' {"partitions_subsets_by_asset_key": {"static_asset": "{\"version\": 1, \"subset\":'
r' [\"f\", \"b\", \"e\", \"c\", \"d\", \"a\"]}"},'
r' "serializable_partitions_ids_by_asset_key": {"static_asset":'
r' "7c2047f8b02e90a69136c1a657bd99ad80b433a2"}, "subset_types_by_asset_key":'
r' {"static_asset": "DEFAULT"}, "non_partitioned_asset_keys": []}, "latest_storage_id":'
r' null, "serialized_requested_subset": {"partitions_subsets_by_asset_key": {},'
r' "serializable_partitions_ids_by_asset_key": {}, "subset_types_by_asset_key": {},'
r' "non_partitioned_asset_keys": []}, "serialized_materialized_subset":'
r' {"partitions_subsets_by_asset_key": {},'
r' "serializable_partitions_ids_by_asset_key": {},'
r' "subset_types_by_asset_key": {}, "non_partitioned_asset_keys": []},'
r' "serialized_failed_subset": {"partitions_subsets_by_asset_key": {},'
r' "serializable_partitions_ids_by_asset_key": {}, "subset_types_by_asset_key": {},'
r' "non_partitioned_asset_keys": []}}'
),
],
)
@pytest.mark.parametrize(
"time_window_serialization",
[
(
r'{"requested_runs_for_target_roots": false, "serialized_target_subset":'
r' {"partitions_subsets_by_asset_key": {"daily_asset": "{\"version\": 1,'
r" \"time_windows\":"
r' [[1659484800.0, 1659744000.0]], \"num_partitions\": 3}"},'
r' "non_partitioned_asset_keys":'
r' []}, "latest_storage_id": null, "serialized_requested_subset":'
r' {"partitions_subsets_by_asset_key": {}, "non_partitioned_asset_keys": []},'
r' "serialized_materialized_subset": {"partitions_subsets_by_asset_key": {},'
r' "non_partitioned_asset_keys": []}, "serialized_failed_subset":'
r' {"partitions_subsets_by_asset_key": {}, "non_partitioned_asset_keys": []}}'
),
(
r'{"requested_runs_for_target_roots": true, "serialized_target_subset":'
r' {"partitions_subsets_by_asset_key": {"daily_asset": "{\"version\": 1,'
r' \"time_windows\": [[1571356800.0, 1571529600.0]], \"num_partitions\": 2}"},'
r' "serializable_partitions_def_ids_by_asset_key": {"daily_asset":'
r' "1d3558e8825a28611c33c1cfe60984c0c5dcf147"},'
r' "partitions_def_class_names_by_asset_key": {"daily_asset":'
r' "TimeWindowPartitionsDefinition"}, "non_partitioned_asset_keys": []},'
r' "latest_storage_id": 235, "serialized_requested_subset":'
r' {"partitions_subsets_by_asset_key": {"daily_asset": "{\"version\": 1,'
r' \"time_windows\": [[1571356800.0, 1571529600.0]], \"num_partitions\": 2}"},'
r' "serializable_partitions_def_ids_by_asset_key": {"daily_asset":'
r' "1d3558e8825a28611c33c1cfe60984c0c5dcf147"},'
r' "partitions_def_class_names_by_asset_key": {"daily_asset":'
r' "TimeWindowPartitionsDefinition"}, "non_partitioned_asset_keys": []},'
r' "serialized_materialized_subset": {"partitions_subsets_by_asset_key": {},'
r' "serializable_partitions_def_ids_by_asset_key": {},'
r' "partitions_def_class_names_by_asset_key": {}, "non_partitioned_asset_keys": []},'
r' "serialized_failed_subset": {"partitions_subsets_by_asset_key": {},'
r' "serializable_partitions_def_ids_by_asset_key": {},'
r' "partitions_def_class_names_by_asset_key": {}, "non_partitioned_asset_keys": []}}'
),
],
)
def test_serialization(static_serialization, time_window_serialization):
time_window_partitions = dg.DailyPartitionsDefinition(start_date="2015-05-05")
keys = ["a", "b", "c", "d", "e", "f"]
static_partitions = dg.StaticPartitionsDefinition(keys)
def make_asset_graph1():
@dg.asset(partitions_def=time_window_partitions)
def daily_asset(): ...
@dg.asset(partitions_def=static_partitions)
def static_asset(): ...
return remote_asset_graph_from_assets_by_repo_name({"repo": [daily_asset, static_asset]})
asset_graph1 = make_asset_graph1()
assert AssetBackfillData.is_valid_serialization(time_window_serialization, asset_graph1) is True
assert AssetBackfillData.is_valid_serialization(static_serialization, asset_graph1) is True
def make_asset_graph2():
@dg.asset(partitions_def=static_partitions)
def daily_asset(): ...
@dg.asset(partitions_def=time_window_partitions)
def static_asset(): ...
return remote_asset_graph_from_assets_by_repo_name({"repo": [daily_asset, static_asset]})
asset_graph2 = make_asset_graph2()
assert (
AssetBackfillData.is_valid_serialization(time_window_serialization, asset_graph2) is False
)
assert AssetBackfillData.is_valid_serialization(static_serialization, asset_graph2) is False
def make_asset_graph3():
@dg.asset(partitions_def=dg.StaticPartitionsDefinition(keys + ["x"]))
def daily_asset(): ...
@dg.asset(partitions_def=static_partitions)
def static_asset(): ...
return remote_asset_graph_from_assets_by_repo_name({"repo": [daily_asset, static_asset]})
asset_graph3 = make_asset_graph3()
assert AssetBackfillData.is_valid_serialization(static_serialization, asset_graph3) is True
def make_asset_graph4():
@dg.asset(partitions_def=static_partitions)
def daily_asset_renamed():
return 1
@dg.asset(partitions_def=time_window_partitions)
def static_asset(): ...
return remote_asset_graph_from_assets_by_repo_name(
{"repo": [daily_asset_renamed, static_asset]}
)
asset_graph4 = make_asset_graph4()
assert (
AssetBackfillData.is_valid_serialization(time_window_serialization, asset_graph4) is False
)
def test_asset_backfill_status_counts():
@dg.asset
def unpartitioned_upstream_of_partitioned():
return 1
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"))
def upstream_daily_partitioned_asset(unpartitioned_upstream_of_partitioned):
return unpartitioned_upstream_of_partitioned
@dg.asset(partitions_def=dg.WeeklyPartitionsDefinition("2023-01-01"))
def downstream_weekly_partitioned_asset(
upstream_daily_partitioned_asset,
):
return upstream_daily_partitioned_asset + 1
assets_by_repo_name = {
"repo": [
unpartitioned_upstream_of_partitioned,
upstream_daily_partitioned_asset,
downstream_weekly_partitioned_asset,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
instance = DagsterInstance.ephemeral()
backfill_data = AssetBackfillData.from_asset_partitions(
partition_names=["2023-01-09"],
asset_graph=asset_graph,
asset_selection=[
unpartitioned_upstream_of_partitioned.key,
upstream_daily_partitioned_asset.key,
downstream_weekly_partitioned_asset.key,
],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=get_current_timestamp(),
)
(
completed_backfill_data,
requested_asset_partitions,
fail_and_downstream_asset_partitions,
) = run_backfill_to_completion(
instance=instance,
asset_graph=asset_graph,
assets_by_repo_name=assets_by_repo_name,
backfill_data=backfill_data,
fail_asset_partitions=[
AssetKeyPartitionKey(
asset_key=upstream_daily_partitioned_asset.key, partition_key="2023-01-09"
)
],
)
counts = completed_backfill_data.get_backfill_status_per_asset_key(asset_graph)
assert counts[0].asset_key == unpartitioned_upstream_of_partitioned.key
assert counts[0].backfill_status == AssetBackfillStatus.MATERIALIZED # pyright: ignore[reportAttributeAccessIssue]
assert counts[1].asset_key == upstream_daily_partitioned_asset.key
assert counts[1].partitions_counts_by_status[AssetBackfillStatus.MATERIALIZED] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[1].partitions_counts_by_status[AssetBackfillStatus.FAILED] == 1 # pyright: ignore[reportAttributeAccessIssue]
assert counts[1].partitions_counts_by_status[AssetBackfillStatus.IN_PROGRESS] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[1].num_targeted_partitions == 1 # pyright: ignore[reportAttributeAccessIssue]
assert counts[2].asset_key == downstream_weekly_partitioned_asset.key
assert counts[2].partitions_counts_by_status[AssetBackfillStatus.MATERIALIZED] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[2].partitions_counts_by_status[AssetBackfillStatus.FAILED] == 1 # pyright: ignore[reportAttributeAccessIssue]
assert counts[2].partitions_counts_by_status[AssetBackfillStatus.IN_PROGRESS] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[2].num_targeted_partitions == 1 # pyright: ignore[reportAttributeAccessIssue]
def test_asset_backfill_status_counts_with_reexecution():
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"), key="upstream")
def upstream_fail():
raise Exception("noo")
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"), key="upstream")
def upstream_success():
pass
assets_by_repo_name = {
"repo": [
upstream_fail,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
instance = DagsterInstance.ephemeral()
backfill_data = AssetBackfillData.from_asset_partitions(
partition_names=["2023-01-01"],
asset_graph=asset_graph,
asset_selection=[
upstream_fail.key,
],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=get_current_timestamp(),
)
backfill_data = _single_backfill_iteration(
"fake_id", backfill_data, asset_graph, instance, assets_by_repo_name
)
backfill_data = _single_backfill_iteration(
"fake_id", backfill_data, asset_graph, instance, assets_by_repo_name
)
counts = backfill_data.get_backfill_status_per_asset_key(asset_graph)
assert counts[0].asset_key == upstream_fail.key
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.MATERIALIZED] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.FAILED] == 1 # pyright: ignore[reportAttributeAccessIssue]
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.IN_PROGRESS] == 0 # pyright: ignore[reportAttributeAccessIssue]
dg.materialize(
[upstream_success],
instance=instance,
partition_key="2023-01-01",
tags={BACKFILL_ID_TAG: "fake_id"},
)
backfill_data = _single_backfill_iteration(
"fake_id", backfill_data, asset_graph, instance, assets_by_repo_name
)
counts = backfill_data.get_backfill_status_per_asset_key(asset_graph)
assert counts[0].asset_key == upstream_fail.key
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.MATERIALIZED] == 1 # pyright: ignore[reportAttributeAccessIssue]
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.FAILED] == 0 # pyright: ignore[reportAttributeAccessIssue]
assert counts[0].partitions_counts_by_status[AssetBackfillStatus.IN_PROGRESS] == 0 # pyright: ignore[reportAttributeAccessIssue]
def test_asset_backfill_selects_only_existent_partitions():
@dg.asset(partitions_def=dg.HourlyPartitionsDefinition("2023-01-01-00:00"))
def upstream_hourly_partitioned_asset():
return 1
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"))
def downstream_daily_partitioned_asset(
upstream_hourly_partitioned_asset,
):
return upstream_hourly_partitioned_asset + 1
assets_by_repo_name = {
"repo": [
upstream_hourly_partitioned_asset,
downstream_daily_partitioned_asset,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_data = AssetBackfillData.from_asset_partitions(
partition_names=["2023-01-09-00:00"],
asset_graph=asset_graph,
asset_selection=[
upstream_hourly_partitioned_asset.key,
downstream_daily_partitioned_asset.key,
],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 9, 1, 1, 0).timestamp(),
)
target_subset = backfill_data.target_subset
assert target_subset.get_partitions_subset(
upstream_hourly_partitioned_asset.key, asset_graph
).get_partition_keys() == ["2023-01-09-00:00"]
assert (
len(
target_subset.get_partitions_subset(downstream_daily_partitioned_asset.key, asset_graph)
)
== 0
)
def test_asset_backfill_throw_error_on_invalid_upstreams():
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-06-01"))
def june_asset():
return 1
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-05-01"))
def may_asset(
june_asset,
):
return june_asset + 1
assets_by_repo_name = {
"repo": [
june_asset,
may_asset,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_data = AssetBackfillData.from_asset_partitions(
partition_names=["2023-05-10"],
asset_graph=asset_graph,
asset_selection=[
may_asset.key,
],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 5, 15, 0, 0, 0).timestamp(),
)
instance = DagsterInstance.ephemeral()
with pytest.raises(
dg.DagsterInvariantViolationError, match="depends on non-existent partitions"
):
run_backfill_to_completion(asset_graph, assets_by_repo_name, backfill_data, [], instance)
def test_asset_backfill_cancellation():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.HourlyPartitionsDefinition("2023-01-01-00:00"))
def upstream_hourly_partitioned_asset():
return 1
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"))
def downstream_daily_partitioned_asset(
upstream_hourly_partitioned_asset,
):
return upstream_hourly_partitioned_asset + 1
assets_by_repo_name = {
"repo": [
upstream_hourly_partitioned_asset,
downstream_daily_partitioned_asset,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_id = "dummy_backfill_id"
backfill_start_datetime = create_datetime(2023, 1, 9, 1, 0, 0)
asset_selection = [
upstream_hourly_partitioned_asset.key,
downstream_daily_partitioned_asset.key,
]
targeted_partitions = ["2023-01-09-00:00"]
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=targeted_partitions,
asset_selection=asset_selection,
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=backfill_start_datetime.timestamp(),
)
_single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert len(instance.get_runs()) == 1
canceling_backfill_data = None
canceling_backfill_data = get_canceling_asset_backfill_iteration_data(
backfill_id,
asset_backfill_data,
_get_asset_graph_view(
instance,
asset_graph,
backfill_start_datetime,
),
backfill_start_datetime.timestamp(),
)
assert isinstance(canceling_backfill_data, AssetBackfillData)
assert (
canceling_backfill_data.all_requested_partitions_marked_as_materialized_or_failed() is True
)
assert (
canceling_backfill_data.materialized_subset.get_partitions_subset(
upstream_hourly_partitioned_asset.key, asset_graph
).get_partition_keys()
== targeted_partitions
)
assert (
canceling_backfill_data.materialized_subset.get_partitions_subset(
downstream_daily_partitioned_asset.key, asset_graph
).get_partition_keys()
== []
)
def test_asset_backfill_cancels_without_fetching_downstreams_of_failed_partitions():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.HourlyPartitionsDefinition("2023-01-01-00:00"))
def upstream_hourly_partitioned_asset():
raise Exception("noo")
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-01-01"))
def downstream_daily_partitioned_asset(
upstream_hourly_partitioned_asset,
):
return upstream_hourly_partitioned_asset + 1
assets_by_repo_name = {
"repo": [
upstream_hourly_partitioned_asset,
downstream_daily_partitioned_asset,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_id = "dummy_backfill_id"
backfill_start_datetime = create_datetime(2023, 1, 10, 0, 0, 0)
asset_selection = [
upstream_hourly_partitioned_asset.key,
downstream_daily_partitioned_asset.key,
]
targeted_partitions = ["2023-01-09-00:00"]
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=targeted_partitions,
asset_selection=asset_selection,
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=backfill_start_datetime.timestamp(),
)
for _ in range(2):
# One iteration to submit a run targeting the partition
# Second iteration to update the asset backfill data
asset_backfill_data = _single_backfill_iteration(
backfill_id, asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert (
AssetKeyPartitionKey(upstream_hourly_partitioned_asset.key, "2023-01-09-00:00")
in asset_backfill_data.failed_and_downstream_subset
)
assert (
AssetKeyPartitionKey(downstream_daily_partitioned_asset.key, "2023-01-09")
in asset_backfill_data.failed_and_downstream_subset
)
canceling_backfill_data = None
canceling_backfill_data = get_canceling_asset_backfill_iteration_data(
backfill_id,
asset_backfill_data,
_get_asset_graph_view(instance, asset_graph, backfill_start_datetime),
backfill_start_datetime.timestamp(),
)
assert isinstance(canceling_backfill_data, AssetBackfillData)
assert (
AssetKeyPartitionKey(upstream_hourly_partitioned_asset.key, "2023-01-09-00:00")
in canceling_backfill_data.failed_and_downstream_subset
)
assert (
AssetKeyPartitionKey(downstream_daily_partitioned_asset.key, "2023-01-09")
in canceling_backfill_data.failed_and_downstream_subset
)
def test_asset_backfill_target_asset_and_same_partitioning_grandchild():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"), deps=[foo])
def foo_child():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"), deps=[foo_child])
def foo_grandchild():
pass
assets_by_repo_name = {
"repo": [
foo,
foo_child,
foo_grandchild,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_selection = [
foo.key,
foo_grandchild.key,
]
all_partitions = [f"2023-10-0{x}" for x in range(1, 5)]
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=None,
asset_selection=asset_selection,
dynamic_partitions_store=MagicMock(),
all_partitions=True,
backfill_start_timestamp=create_datetime(2023, 10, 5, 0, 0, 0).timestamp(),
)
assert set(asset_backfill_data.target_subset.iterate_asset_partitions()) == {
AssetKeyPartitionKey(asset_key, partition_key)
for asset_key in [foo.key, foo_grandchild.key]
for partition_key in all_partitions
}
asset_backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == asset_backfill_data.target_subset
def test_asset_backfill_target_asset_and_differently_partitioned_grandchild():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"), deps={foo})
def foo_child():
pass
@dg.asset(partitions_def=dg.WeeklyPartitionsDefinition("2023-10-01"), deps={foo_child})
def foo_grandchild():
pass
assets_by_repo_name = {
"repo": [
foo,
foo_child,
foo_grandchild,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_selection = [
foo.key,
foo_grandchild.key,
]
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=None,
asset_selection=asset_selection,
dynamic_partitions_store=MagicMock(),
all_partitions=True,
backfill_start_timestamp=create_datetime(2023, 10, 8, 0, 0, 0).timestamp(),
)
expected_targeted_partitions = {
AssetKeyPartitionKey(foo_grandchild.key, "2023-10-01"),
*{
AssetKeyPartitionKey(asset_key, partition_key)
for asset_key in [foo.key]
for partition_key in [f"2023-10-0{x}" for x in range(1, 8)]
},
}
assert (
set(asset_backfill_data.target_subset.iterate_asset_partitions())
== expected_targeted_partitions
)
asset_backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == asset_backfill_data.target_subset
def test_asset_backfill_nonexistent_parent_partitions():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-05"))
def foo():
pass
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-10-01"),
ins={
"foo": dg.AssetIn(
key=foo.key,
partition_mapping=dg.TimeWindowPartitionMapping(
allow_nonexistent_upstream_partitions=True
),
dagster_type=Nothing,
)
},
)
def foo_child():
pass
assets_by_repo_name = {
"repo": [
foo,
foo_child,
]
}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=None,
asset_selection=[foo.key, foo_child.key],
dynamic_partitions_store=MagicMock(),
all_partitions=True,
backfill_start_timestamp=create_datetime(2023, 10, 8, 0, 0, 0).timestamp(),
)
backfill_data, _, _ = run_backfill_to_completion(
asset_graph, assets_by_repo_name, asset_backfill_data, [], instance
)
assert set(backfill_data.target_subset.get_partitions_subset(foo.key).get_partition_keys()) == {
"2023-10-05",
"2023-10-06",
"2023-10-07",
}
assert set(
backfill_data.target_subset.get_partitions_subset(foo_child.key).get_partition_keys()
) == {
"2023-10-01",
"2023-10-02",
"2023-10-03",
"2023-10-04",
"2023-10-05",
"2023-10-06",
"2023-10-07",
}
assert backfill_data.target_subset == backfill_data.materialized_subset
def test_connected_assets_disconnected_partitions():
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo_child(foo):
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo_grandchild(foo_child):
pass
assets_by_repo_name = {"repo": [foo, foo_child, foo_grandchild]}
asset_graph = get_asset_graph(assets_by_repo_name)
backfill_start_datetime = create_datetime(2023, 10, 30, 0, 0, 0)
instance_queryer = _get_instance_queryer(instance, asset_graph, backfill_start_datetime)
asset_backfill_data = AssetBackfillData.from_partitions_by_assets(
asset_graph,
instance_queryer,
backfill_start_datetime.timestamp(),
[
PartitionsByAssetSelector(
asset_key=foo.key,
partitions=PartitionsSelector([PartitionRangeSelector("2023-10-01", "2023-10-05")]),
),
PartitionsByAssetSelector(
asset_key=foo_child.key,
partitions=PartitionsSelector([PartitionRangeSelector("2023-10-01", "2023-10-03")]),
),
PartitionsByAssetSelector(
asset_key=foo_grandchild.key,
partitions=PartitionsSelector([PartitionRangeSelector("2023-10-10", "2023-10-13")]),
),
],
)
target_root_subset = asset_backfill_data.get_target_root_asset_graph_subset(
_get_asset_graph_view(instance, asset_graph, backfill_start_datetime)
)
assert set(target_root_subset.iterate_asset_partitions()) == {
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo"]), partition_key="2023-10-05"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo"]), partition_key="2023-10-03"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo"]), partition_key="2023-10-04"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo"]), partition_key="2023-10-02"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo"]), partition_key="2023-10-01"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo_grandchild"]), partition_key="2023-10-11"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo_grandchild"]), partition_key="2023-10-13"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo_grandchild"]), partition_key="2023-10-12"),
AssetKeyPartitionKey(asset_key=dg.AssetKey(["foo_grandchild"]), partition_key="2023-10-10"),
}
def test_partition_outside_backfill_materialized():
"""Tests the case where the PartitionsDefinition has a new partition since the backfill started,
and that partitions is materialized outside of the backfill.
"""
instance = DagsterInstance.ephemeral()
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"), deps={foo})
def foo_child():
pass
assets_by_repo_name = {"repo1": [foo], "repo2": [foo_child]}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["2023-10-01", "2023-10-02"],
asset_selection=[foo.key, foo_child.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 10, 3, 0, 0, 0).timestamp(),
)
backfill_data, _, _ = run_backfill_to_completion(
asset_graph, assets_by_repo_name, asset_backfill_data, [], instance
)
_single_backfill_iteration(
backfill_id="apple",
backfill_data=backfill_data,
asset_graph=asset_graph,
instance=instance,
assets_by_repo_name=assets_by_repo_name,
)
dg.materialize(assets=[foo], partition_key="2023-10-03", instance=instance)
result_backfill_data = _single_backfill_iteration(
backfill_id="apple",
backfill_data=backfill_data,
asset_graph=asset_graph,
instance=instance,
assets_by_repo_name=assets_by_repo_name,
)
materialized_subset = result_backfill_data.materialized_subset
assert result_backfill_data.target_subset == materialized_subset
assert (
"2023-10-03" not in materialized_subset.get_partitions_subset(foo.key).get_partition_keys()
)
assert (
"2023-10-03"
not in materialized_subset.get_partitions_subset(foo_child.key).get_partition_keys()
)
def test_asset_backfill_unpartitioned_downstream_of_partitioned():
instance = DagsterInstance.ephemeral()
foo_partitions_def = dg.DailyPartitionsDefinition("2023-10-01")
@dg.asset(partitions_def=foo_partitions_def)
def foo():
pass
@dg.asset(
partitions_def=foo_partitions_def,
ins={
"foo": dg.AssetIn(
key=foo.key, partition_mapping=dg.LastPartitionMapping(), dagster_type=Nothing
)
},
)
def foo_child():
pass
assets_by_repo_name = {"repo": [foo, foo_child]}
asset_graph = get_asset_graph(assets_by_repo_name)
partition_key_range = dg.PartitionKeyRange(start="2023-10-01", end="2023-10-07")
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=foo_partitions_def.get_partition_keys_in_range(partition_key_range),
asset_selection=[foo.key, foo_child.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 10, 8, 0, 0, 0).timestamp(),
)
assert asset_backfill_data.target_subset.partitions_subsets_by_asset_key == {
foo.key: foo_partitions_def.empty_subset().with_partition_key_range(
foo_partitions_def, partition_key_range
),
foo_child.key: foo_partitions_def.empty_subset().with_partition_key_range(
foo_partitions_def, partition_key_range
),
}
run_backfill_to_completion(asset_graph, assets_by_repo_name, asset_backfill_data, [], instance)
def test_asset_backfill_serialization_deserialization():
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
)
def upstream():
pass
@dg.asset
def middle():
pass
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
)
def downstream(upstream):
pass
assets_by_repo_name = {"repo": [upstream, downstream, middle]}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["2023-01-01", "2023-01-02", "2023-01-05"],
asset_selection=[upstream.key, middle.key, downstream.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 1, 9, 0, 0, 0).timestamp(),
)
assert (
dg.deserialize_value(dg.serialize_value(asset_backfill_data), AssetBackfillData)
== asset_backfill_data
)
def test_asset_backfill_unpartitioned_root_turned_to_partitioned():
@dg.asset
def first():
return 1
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2024-01-01"),
ins={"first": dg.AssetIn(key=dg.AssetKey("first"))},
)
def second(first):
return 1
@dg.asset(key=dg.AssetKey("first"), partitions_def=dg.DailyPartitionsDefinition("2024-01-01"))
def first_partitioned():
return 1
repo_with_unpartitioned_root = {"repo": [first, second]}
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=get_asset_graph(repo_with_unpartitioned_root),
partition_names=["2024-01-01"],
asset_selection=[first.key, second.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2024, 1, 9, 0, 0, 0).timestamp(),
)
repo_with_partitioned_root = {"repo": [first_partitioned, second]}
assert asset_backfill_data.get_target_root_partitions_subset(
get_asset_graph(repo_with_partitioned_root)
).get_partition_keys() == ["2024-01-01"] # pyright: ignore[reportOptionalMemberAccess]
def test_asset_backfill_start_date_changed():
instance = DagsterInstance.ephemeral()
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2024-01-01"),
)
def first():
return 1
@dg.asset(
partitions_def=dg.DailyPartitionsDefinition("2023-01-01"),
name="first",
)
def new_first():
return 1
old_repo = {"repo": [first]}
new_repo = {"repo": [new_first]}
start_time = create_datetime(2024, 1, 9, 0, 0, 0)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=get_asset_graph(old_repo),
partition_names=["2024-01-01"],
asset_selection=[first.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=start_time.timestamp(),
)
new_asset_graph = get_asset_graph(new_repo)
_check_asset_backfill_data_validity(
asset_backfill_data,
new_asset_graph,
_get_instance_queryer(instance, new_asset_graph, start_time),
)
asset_backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, new_asset_graph, instance, new_repo
)
assert list(asset_backfill_data.requested_subset.iterate_asset_partitions()) == list(
asset_backfill_data.target_subset.iterate_asset_partitions()
)
def test_multi_asset_internal_deps_asset_backfill():
@dg.multi_asset(
outs={"a": dg.AssetOut(key="a"), "b": dg.AssetOut(key="b"), "c": dg.AssetOut(key="c")},
internal_asset_deps={"c": {dg.AssetKey("a")}, "b": {dg.AssetKey("a")}, "a": set()},
partitions_def=dg.StaticPartitionsDefinition(["1", "2", "3"]),
)
def my_multi_asset():
pass
instance = DagsterInstance.ephemeral()
repo_with_unpartitioned_root = {"repo": [my_multi_asset]}
asset_graph = get_asset_graph(repo_with_unpartitioned_root)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["1"],
asset_selection=[dg.AssetKey("a"), dg.AssetKey("b"), dg.AssetKey("c")],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2024, 1, 9, 0, 0, 0).timestamp(),
)
backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, repo_with_unpartitioned_root
)
assert AssetKeyPartitionKey(dg.AssetKey("a"), "1") in backfill_data.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("b"), "1") in backfill_data.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("c"), "1") in backfill_data.requested_subset
def test_multi_asset_internal_deps_different_partitions_asset_backfill() -> None:
@dg.multi_asset(
specs=[
dg.AssetSpec(
"asset1", partitions_def=dg.StaticPartitionsDefinition(["a", "b"]), skippable=True
),
dg.AssetSpec(
"asset2",
partitions_def=dg.StaticPartitionsDefinition(["1"]),
deps=[
dg.AssetDep(
"asset1",
partition_mapping=dg.StaticPartitionMapping({"a": {"1"}, "b": {"1"}}),
)
],
skippable=True,
),
],
can_subset=True,
)
def my_multi_asset(context):
for asset_key in context.selected_asset_keys:
yield dg.MaterializeResult(asset_key=asset_key)
instance = DagsterInstance.ephemeral()
repo_dict = {"repo": [my_multi_asset]}
asset_graph = get_asset_graph(repo_dict)
current_time = create_datetime(2024, 1, 9, 0, 0, 0)
with partition_loading_context(current_time, instance):
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=AssetGraphSubset.all(asset_graph),
backfill_start_timestamp=current_time.timestamp(),
dynamic_partitions_store=MagicMock(),
)
backfill_data_after_iter1 = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, repo_dict
)
after_iter1_requested_subset = backfill_data_after_iter1.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("asset1"), "a") in after_iter1_requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("asset1"), "b") in after_iter1_requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("asset2"), "1") not in after_iter1_requested_subset
backfill_data_after_iter2 = _single_backfill_iteration(
"fake_id", backfill_data_after_iter1, asset_graph, instance, repo_dict
)
after_iter2_requested_subset = backfill_data_after_iter2.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("asset2"), "1") in after_iter2_requested_subset
def test_multi_asset_internal_and_external_deps_asset_backfill() -> None:
pd = dg.StaticPartitionsDefinition(["1", "2", "3"])
@dg.asset(partitions_def=pd)
def upstream():
pass
@dg.multi_asset(
deps={upstream},
outs={"a": dg.AssetOut(key="a"), "b": dg.AssetOut(key="b"), "c": dg.AssetOut(key="c")},
internal_asset_deps={
"c": {dg.AssetKey("a"), dg.AssetKey("upstream")},
"b": {dg.AssetKey("a")},
"a": set(),
},
partitions_def=pd,
)
def my_multi_asset():
pass
instance = DagsterInstance.ephemeral()
repo_with_unpartitioned_root = {"repo": [my_multi_asset, upstream]}
asset_graph = get_asset_graph(repo_with_unpartitioned_root)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["1"],
asset_selection=[dg.AssetKey("a"), dg.AssetKey("b"), dg.AssetKey("c")],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2024, 1, 9, 0, 0, 0).timestamp(),
)
backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, repo_with_unpartitioned_root
)
assert AssetKeyPartitionKey(dg.AssetKey("a"), "1") in backfill_data.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("b"), "1") in backfill_data.requested_subset
assert AssetKeyPartitionKey(dg.AssetKey("c"), "1") in backfill_data.requested_subset
def test_run_request_partition_order():
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"))
def foo():
pass
@dg.asset(partitions_def=dg.DailyPartitionsDefinition("2023-10-01"), deps={foo})
def foo_child():
pass
assets_by_repo_name = {"repo1": [foo], "repo2": [foo_child]}
asset_graph = get_asset_graph(assets_by_repo_name)
asset_backfill_data = AssetBackfillData.from_asset_partitions(
asset_graph=asset_graph,
partition_names=["2023-10-02", "2023-10-01", "2023-10-03"],
asset_selection=[foo.key, foo_child.key],
dynamic_partitions_store=MagicMock(),
all_partitions=False,
backfill_start_timestamp=create_datetime(2023, 10, 4, 0, 0, 0).timestamp(),
)
result = execute_asset_backfill_iteration_consume_generator(
"apple", asset_backfill_data, asset_graph, DagsterInstance.ephemeral()
)
assert [run_request.partition_key for run_request in result.run_requests] == [
"2023-10-01",
"2023-10-02",
"2023-10-03",
]
def test_asset_backfill_multiple_partition_ranges():
instance = DagsterInstance.ephemeral()
partitions_def = dg.DailyPartitionsDefinition("2023-10-01")
@dg.asset(partitions_def=partitions_def)
def foo():
pass
@dg.asset(partitions_def=partitions_def, deps=[foo])
def foo_child():
pass
assets_by_repo_name = {"repo": [foo, foo_child]}
asset_graph = get_asset_graph(assets_by_repo_name)
target_partitions_subset = (
partitions_def.empty_subset()
.with_partition_key_range(partitions_def, dg.PartitionKeyRange("2023-11-01", "2023-11-03"))
.with_partition_key_range(partitions_def, dg.PartitionKeyRange("2023-11-06", "2023-11-07"))
)
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=AssetGraphSubset(
partitions_subsets_by_asset_key={
foo.key: target_partitions_subset,
foo_child.key: target_partitions_subset,
}
),
dynamic_partitions_store=MagicMock(),
backfill_start_timestamp=create_datetime(2023, 12, 5, 0, 0, 0).timestamp(),
)
assert set(asset_backfill_data.target_subset.iterate_asset_partitions()) == {
AssetKeyPartitionKey(foo.key, "2023-11-01"),
AssetKeyPartitionKey(foo.key, "2023-11-02"),
AssetKeyPartitionKey(foo.key, "2023-11-03"),
AssetKeyPartitionKey(foo.key, "2023-11-06"),
AssetKeyPartitionKey(foo.key, "2023-11-07"),
AssetKeyPartitionKey(foo_child.key, "2023-11-01"),
AssetKeyPartitionKey(foo_child.key, "2023-11-02"),
AssetKeyPartitionKey(foo_child.key, "2023-11-03"),
AssetKeyPartitionKey(foo_child.key, "2023-11-06"),
AssetKeyPartitionKey(foo_child.key, "2023-11-07"),
}
asset_backfill_data = _single_backfill_iteration(
"fake_id", asset_backfill_data, asset_graph, instance, assets_by_repo_name
)
assert asset_backfill_data.requested_subset == asset_backfill_data.target_subset
def test_asset_backfill_with_asset_check():
instance = DagsterInstance.ephemeral()
partitions_def = dg.DailyPartitionsDefinition("2023-10-01")
@dg.asset(partitions_def=partitions_def, backfill_policy=BackfillPolicy.single_run())
def foo():
pass
@dg.asset_check(asset=foo)
def foo_check():
return dg.AssetCheckResult(passed=True)
assets_by_repo_name = {"repo": [foo, foo_check]}
asset_graph = get_asset_graph(assets_by_repo_name)
target_partitions_subset = partitions_def.empty_subset().with_partition_key_range(
partitions_def, dg.PartitionKeyRange("2023-11-01", "2023-11-03")
)
asset_backfill_data = AssetBackfillData.from_asset_graph_subset(
asset_graph_subset=AssetGraphSubset(
partitions_subsets_by_asset_key={foo.key: target_partitions_subset}
),
dynamic_partitions_store=MagicMock(),
backfill_start_timestamp=create_datetime(2023, 12, 5, 0, 0, 0).timestamp(),
)
assert set(asset_backfill_data.target_subset.iterate_asset_partitions()) == {
AssetKeyPartitionKey(foo.key, "2023-11-01"),
AssetKeyPartitionKey(foo.key, "2023-11-02"),
AssetKeyPartitionKey(foo.key, "2023-11-03"),
}
result = execute_asset_backfill_iteration_consume_generator(
backfill_id="fake_id",
asset_backfill_data=asset_backfill_data,
asset_graph=asset_graph,
instance=instance,
)
assert len(result.run_requests) == 1
run_request = result.run_requests[0]
assert run_request.asset_selection == [foo.key]
assert run_request.asset_check_keys == [foo_check.check_key]
| AssetBackfillScenario |
python | walkccc__LeetCode | solutions/2606. Find the Substring With Maximum Cost/2606.py | {
"start": 0,
"end": 379
} | class ____:
def maximumCostSubstring(self, s: str, chars: str, vals: list[int]) -> int:
ans = 0
cost = 0
costs = [i for i in range(1, 27)] # costs[i] := the cost of 'a' + i
for c, val in zip(chars, vals):
costs[ord(c) - ord('a')] = val
for c in s:
cost = max(0, cost + costs[ord(c) - ord('a')])
ans = max(ans, cost)
return ans
| Solution |
python | plotly__plotly.py | plotly/graph_objs/bar/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9933
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar.marker.colorbar"
_path_str = "bar.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | ray-project__ray | rllib/models/tests/test_catalog.py | {
"start": 1006,
"end": 1118
} | class ____(TFModelV2):
def _build_layers(self, *args):
return tf.constant([[0] * 5]), None
| CustomModel |
python | huggingface__transformers | tests/quantization/quark_integration/test_quark.py | {
"start": 5915,
"end": 5976
} | class ____(QuarkTest):
device_map = "auto"
| QuarkTestDeviceMap |
python | doocs__leetcode | lcci/17.20.Continuous Median/Solution.py | {
"start": 0,
"end": 594
} | class ____:
def __init__(self):
self.minq = []
self.maxq = []
def addNum(self, num: int) -> None:
heappush(self.minq, -heappushpop(self.maxq, -num))
if len(self.minq) - len(self.maxq) > 1:
heappush(self.maxq, -heappop(self.minq))
def findMedian(self) -> float:
if len(self.minq) == len(self.maxq):
return (self.minq[0] - self.maxq[0]) / 2
return self.minq[0]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| MedianFinder |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 14295,
"end": 15651
} | class ____(logging.Filter):
"""Converts a log record to one Sphinx expects
* Make a instance of SphinxLogRecord
* docname to path if location given
* append warning type/subtype to message if :confval:`show_warning_types` is ``True``
"""
LogRecordClass: type[logging.LogRecord]
def __init__(self, app: Sphinx) -> None:
self._app = app
super().__init__()
def filter(self, record: SphinxWarningLogRecord) -> bool: # type: ignore[override]
if isinstance(record, logging.LogRecord):
# force subclassing to handle location
record.__class__ = self.LogRecordClass # type: ignore[assignment]
location = getattr(record, 'location', None)
if isinstance(location, tuple):
docname, lineno = location
if docname:
if lineno:
record.location = f'{self._app.env.doc2path(docname)}:{lineno}'
else:
record.location = f'{self._app.env.doc2path(docname)}'
else:
record.location = None
elif isinstance(location, nodes.Node):
record.location = get_node_location(location)
elif location and ':' not in location:
record.location = f'{self._app.env.doc2path(location)}'
return True
| SphinxLogRecordTranslator |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 569,
"end": 699
} | class ____(Exception):
headline = "Airflow version is incompatible to support Metaflow `foreach`s."
| ForeachIncompatibleException |
python | spack__spack | lib/spack/spack/package_prefs.py | {
"start": 532,
"end": 9188
} | class ____:
"""Defines the sort order for a set of specs.
Spack's package preference implementation uses PackagePrefs to
define sort order. The PackagePrefs class looks at Spack's
packages.yaml configuration and, when called on a spec, returns a key
that can be used to sort that spec in order of the user's
preferences.
You can use it like this::
# key function sorts CompilerSpecs for `mpich` in order of preference
kf = PackagePrefs("mpich", "compiler")
compiler_list.sort(key=kf)
Or like this::
# key function to sort VersionLists for OpenMPI in order of preference.
kf = PackagePrefs("openmpi", "version")
version_list.sort(key=kf)
Optionally, you can sort in order of preferred virtual dependency
providers. To do that, provide ``"providers"`` and a third argument
denoting the virtual package (e.g., ``mpi``)::
kf = PackagePrefs("trilinos", "providers", "mpi")
provider_spec_list.sort(key=kf)
"""
def __init__(self, pkgname, component, vpkg=None, all=True):
self.pkgname = pkgname
self.component = component
self.vpkg = vpkg
self.all = all
self._spec_order = None
def __call__(self, spec):
"""Return a key object (an index) that can be used to sort spec.
Sort is done in package order. We don't cache the result of
this function as Python's sort functions already ensure that the
key function is called at most once per sorted element.
"""
if self._spec_order is None:
self._spec_order = self._specs_for_pkg(
self.pkgname, self.component, self.vpkg, self.all
)
spec_order = self._spec_order
# integer is the index of the first spec in order that satisfies
# spec, or it's a number larger than any position in the order.
match_index = next(
(i for i, s in enumerate(spec_order) if spec.intersects(s)), len(spec_order)
)
if match_index < len(spec_order) and spec_order[match_index] == spec:
# If this is called with multiple specs that all satisfy the same
# minimum index in spec_order, the one which matches that element
# of spec_order exactly is considered slightly better. Note
# that because this decreases the value by less than 1, it is not
# better than a match which occurs at an earlier index.
match_index -= 0.5
return match_index
@classmethod
def order_for_package(cls, pkgname, component, vpkg=None, all=True):
"""Given a package name, sort component (e.g, version, compiler, ...),
and an optional vpkg, return the list from the packages config.
"""
pkglist = [pkgname]
if all:
pkglist.append("all")
packages = spack.config.CONFIG.get_config("packages")
for pkg in pkglist:
pkg_entry = packages.get(pkg)
if not pkg_entry:
continue
order = pkg_entry.get(component)
if not order:
continue
# vpkg is one more level
if vpkg is not None:
order = order.get(vpkg)
if order:
ret = [str(s).strip() for s in order]
if component == "target":
ret = ["target=%s" % tname for tname in ret]
return ret
return []
@classmethod
def _specs_for_pkg(cls, pkgname, component, vpkg=None, all=True):
"""Given a sort order specified by the pkgname/component/second_key,
return a list of CompilerSpecs, VersionLists, or Specs for
that sorting list.
"""
pkglist = cls.order_for_package(pkgname, component, vpkg, all)
spec_type = _spec_type(component)
return [spec_type(s) for s in pkglist]
@classmethod
def has_preferred_providers(cls, pkgname, vpkg):
"""Whether specific package has a preferred vpkg providers."""
return bool(cls.order_for_package(pkgname, "providers", vpkg, False))
@classmethod
def has_preferred_targets(cls, pkg_name):
"""Whether specific package has a preferred vpkg providers."""
return bool(cls.order_for_package(pkg_name, "target"))
@classmethod
def preferred_variants(cls, pkg_name):
"""Return a VariantMap of preferred variants/values for a spec."""
packages = spack.config.CONFIG.get_config("packages")
for pkg_cls in (pkg_name, "all"):
variants = packages.get(pkg_cls, {}).get("variants", "")
if variants:
break
# allow variants to be list or string
if not isinstance(variants, str):
variants = " ".join(variants)
# Only return variants that are actually supported by the package
pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name)
spec = spack.spec.Spec(f"{pkg_name} {variants}")
return {
name: variant
for name, variant in spec.variants.items()
if name in pkg_cls.variant_names()
}
def is_spec_buildable(spec):
"""Return true if the spec is configured as buildable"""
allpkgs = spack.config.get("packages")
all_buildable = allpkgs.get("all", {}).get("buildable", True)
so_far = all_buildable # the default "so far"
def _package(s):
pkg_cls = spack.repo.PATH.get_pkg_class(s.name)
return pkg_cls(s)
# check whether any providers for this package override the default
if any(
_package(spec).provides(name) and entry.get("buildable", so_far) != so_far
for name, entry in allpkgs.items()
):
so_far = not so_far
spec_buildable = allpkgs.get(spec.name, {}).get("buildable", so_far)
return spec_buildable
def get_package_dir_permissions(spec):
"""Return the permissions configured for the spec.
Include the GID bit if group permissions are on. This makes the group
attribute sticky for the directory. Package-specific settings take
precedent over settings for ``all``"""
perms = get_package_permissions(spec)
if perms & stat.S_IRWXG and spack.config.get("config:allow_sgid", True):
perms |= stat.S_ISGID
if spec.concrete and "/afs/" in spec.prefix:
warnings.warn(
"Directory {0} seems to be located on AFS. If you"
" encounter errors, try disabling the allow_sgid option"
" using: spack config add 'config:allow_sgid:false'".format(spec.prefix)
)
return perms
def get_package_permissions(spec):
"""Return the permissions configured for the spec.
Package-specific settings take precedence over settings for ``all``"""
# Get read permissions level
for name in (spec.name, "all"):
try:
readable = spack.config.get("packages:%s:permissions:read" % name, "")
if readable:
break
except AttributeError:
readable = "world"
# Get write permissions level
for name in (spec.name, "all"):
try:
writable = spack.config.get("packages:%s:permissions:write" % name, "")
if writable:
break
except AttributeError:
writable = "user"
perms = stat.S_IRWXU
if readable in ("world", "group"): # world includes group
perms |= stat.S_IRGRP | stat.S_IXGRP
if readable == "world":
perms |= stat.S_IROTH | stat.S_IXOTH
if writable in ("world", "group"):
if readable == "user":
raise ConfigError(
"Writable permissions may not be more"
+ " permissive than readable permissions.\n"
+ " Violating package is %s" % spec.name
)
perms |= stat.S_IWGRP
if writable == "world":
if readable != "world":
raise ConfigError(
"Writable permissions may not be more"
+ " permissive than readable permissions.\n"
+ " Violating package is %s" % spec.name
)
perms |= stat.S_IWOTH
return perms
def get_package_group(spec):
"""Return the unix group associated with the spec.
Package-specific settings take precedence over settings for ``all``"""
for name in (spec.name, "all"):
try:
group = spack.config.get("packages:%s:permissions:group" % name, "")
if group:
break
except AttributeError:
group = ""
return group
| PackagePrefs |
python | apache__airflow | airflow-core/src/airflow/models/xcom.py | {
"start": 1920,
"end": 14856
} | class ____(TaskInstanceDependencies):
"""XCom model class. Contains table and some utilities."""
__tablename__ = "xcom"
dag_run_id: Mapped[int] = mapped_column(Integer(), nullable=False, primary_key=True)
task_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), nullable=False, primary_key=True)
map_index: Mapped[int] = mapped_column(
Integer, primary_key=True, nullable=False, server_default=text("-1")
)
key: Mapped[str] = mapped_column(String(512, **COLLATION_ARGS), nullable=False, primary_key=True)
# Denormalized for easier lookup.
dag_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
run_id: Mapped[str] = mapped_column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
value: Mapped[Any] = mapped_column(JSON().with_variant(postgresql.JSONB, "postgresql"), nullable=True)
timestamp: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False)
__table_args__ = (
# Ideally we should create a unique index over (key, dag_id, task_id, run_id),
# but it goes over MySQL's index length limit. So we instead index 'key'
# separately, and enforce uniqueness with DagRun.id instead.
Index("idx_xcom_key", key),
Index("idx_xcom_task_instance", dag_id, task_id, run_id, map_index),
PrimaryKeyConstraint("dag_run_id", "task_id", "map_index", "key", name="xcom_pkey"),
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name="xcom_task_instance_fkey",
ondelete="CASCADE",
),
)
dag_run = relationship(
"DagRun",
primaryjoin="XComModel.dag_run_id == foreign(DagRun.id)",
uselist=False,
lazy="joined",
passive_deletes="all",
)
logical_date = association_proxy("dag_run", "logical_date")
task = relationship(
"TaskInstance",
viewonly=True,
lazy="noload",
)
@classmethod
@provide_session
def clear(
cls,
*,
dag_id: str,
task_id: str,
run_id: str,
map_index: int | None = None,
session: Session = NEW_SESSION,
) -> None:
"""
Clear all XCom data from the database for the given task instance.
.. note:: This **will not** purge any data from a custom XCom backend.
:param dag_id: ID of DAG to clear the XCom for.
:param task_id: ID of task to clear the XCom for.
:param run_id: ID of DAG run to clear the XCom for.
:param map_index: If given, only clear XCom from this particular mapped
task. The default ``None`` clears *all* XComs from the task.
:param session: Database session. If not given, a new session will be
created for this function.
"""
# Given the historic order of this function (logical_date was first argument) to add a new optional
# param we need to add default values for everything :(
if dag_id is None:
raise TypeError("clear() missing required argument: dag_id")
if task_id is None:
raise TypeError("clear() missing required argument: task_id")
if not run_id:
raise ValueError(f"run_id must be passed. Passed run_id={run_id}")
query = select(cls).where(cls.dag_id == dag_id, cls.task_id == task_id, cls.run_id == run_id)
if map_index is not None:
query = query.where(cls.map_index == map_index)
for xcom in session.scalars(query):
# print(f"Clearing XCOM {xcom} with value {xcom.value}")
session.delete(xcom)
session.commit()
@classmethod
@provide_session
def set(
cls,
key: str,
value: Any,
*,
dag_id: str,
task_id: str,
run_id: str,
map_index: int = -1,
serialize: bool = True,
session: Session = NEW_SESSION,
) -> None:
"""
Store an XCom value.
:param key: Key to store the XCom.
:param value: XCom value to store.
:param dag_id: DAG ID.
:param task_id: Task ID.
:param run_id: DAG run ID for the task.
:param map_index: Optional map index to assign XCom for a mapped task.
:param serialize: Optional parameter to specify if value should be serialized or not.
The default is ``True``.
:param session: Database session. If not given, a new session will be
created for this function.
"""
from airflow.models.dagrun import DagRun
if not key:
raise ValueError(f"XCom key must be a non-empty string. Received: {key!r}")
if not run_id:
raise ValueError(f"run_id must be passed. Passed run_id={run_id}")
dag_run_id = session.scalar(select(DagRun.id).where(DagRun.dag_id == dag_id, DagRun.run_id == run_id))
if dag_run_id is None:
raise ValueError(f"DAG run not found on DAG {dag_id!r} with ID {run_id!r}")
# Seamlessly resolve LazySelectSequence to a list. This intends to work
# as a "lazy list" to avoid pulling a ton of XComs unnecessarily, but if
# it's pushed into XCom, the user should be aware of the performance
# implications, and this avoids leaking the implementation detail.
if isinstance(value, LazySelectSequence):
warning_message = (
"Coercing mapped lazy proxy %s from task %s (DAG %s, run %s) "
"to list, which may degrade performance. Review resource "
"requirements for this operation, and call list() to suppress "
"this message. See Dynamic Task Mapping documentation for "
"more information about lazy proxy objects."
)
log.warning(
warning_message,
"return value" if key == XCOM_RETURN_KEY else f"value {key}",
task_id,
dag_id,
run_id,
)
value = list(value)
if serialize:
value = cls.serialize_value(
value=value,
key=key,
task_id=task_id,
dag_id=dag_id,
run_id=run_id,
map_index=map_index,
)
# Remove duplicate XComs and insert a new one.
session.execute(
delete(cls).where(
cls.key == key,
cls.run_id == run_id,
cls.task_id == task_id,
cls.dag_id == dag_id,
cls.map_index == map_index,
)
)
new = cls(
dag_run_id=dag_run_id,
key=key,
value=value,
run_id=run_id,
task_id=task_id,
dag_id=dag_id,
map_index=map_index,
)
session.add(new)
session.flush()
@classmethod
def get_many(
cls,
*,
run_id: str,
key: str | None = None,
task_ids: str | Iterable[str] | None = None,
dag_ids: str | Iterable[str] | None = None,
map_indexes: int | Iterable[int] | None = None,
include_prior_dates: bool = False,
limit: int | None = None,
) -> Select[tuple[XComModel]]:
"""
Composes a query to get one or more XCom entries.
This function returns an SQLAlchemy query of full XCom objects. If you
just want one stored value, use :meth:`get_one` instead.
:param run_id: DAG run ID for the task.
:param key: A key for the XComs. If provided, only XComs with matching
keys will be returned. Pass *None* (default) to remove the filter.
:param task_ids: Only XComs from task with matching IDs will be pulled.
Pass *None* (default) to remove the filter.
:param dag_ids: Only pulls XComs from specified DAGs. Pass *None*
(default) to remove the filter.
:param map_indexes: Only XComs from matching map indexes will be pulled.
Pass *None* (default) to remove the filter.
:param include_prior_dates: If *False* (default), only XComs from the
specified DAG run are returned. If *True*, all matching XComs are
returned regardless of the run it belongs to.
:param session: Database session. If not given, a new session will be
created for this function.
:param limit: Limiting returning XComs
"""
from airflow.models.dagrun import DagRun
if key is not None and not key:
raise ValueError(f"XCom key must be a non-empty string. Received: {key!r}")
if not run_id:
raise ValueError(f"run_id must be passed. Passed run_id={run_id}")
query = select(cls).join(XComModel.dag_run)
if key:
query = query.where(XComModel.key == key)
if is_container(task_ids):
query = query.where(cls.task_id.in_(task_ids))
elif task_ids is not None:
query = query.where(cls.task_id == task_ids)
if is_container(dag_ids):
query = query.where(cls.dag_id.in_(dag_ids))
elif dag_ids is not None:
query = query.where(cls.dag_id == dag_ids)
if isinstance(map_indexes, range) and map_indexes.step == 1:
query = query.where(cls.map_index >= map_indexes.start, cls.map_index < map_indexes.stop)
elif is_container(map_indexes):
query = query.where(cls.map_index.in_(map_indexes))
elif map_indexes is not None:
query = query.where(cls.map_index == map_indexes)
if include_prior_dates:
dr = (
select(
func.coalesce(DagRun.logical_date, DagRun.run_after).label("logical_date_or_run_after")
)
.where(DagRun.run_id == run_id)
.subquery()
)
query = query.where(
func.coalesce(DagRun.logical_date, DagRun.run_after) <= dr.c.logical_date_or_run_after
)
else:
query = query.where(cls.run_id == run_id)
query = query.order_by(DagRun.logical_date.desc(), cls.timestamp.desc())
if limit:
return query.limit(limit)
return query
@staticmethod
def serialize_value(
value: Any,
*,
key: str | None = None,
task_id: str | None = None,
dag_id: str | None = None,
run_id: str | None = None,
map_index: int | None = None,
) -> str:
"""Serialize XCom value to JSON str."""
try:
return json.dumps(value, cls=XComEncoder)
except (ValueError, TypeError):
raise ValueError("XCom value must be JSON serializable")
@staticmethod
def deserialize_value(result: Any) -> Any:
"""
Deserialize XCom value from a database result.
If deserialization fails, the raw value is returned, which must still be a valid Python JSON-compatible
type (e.g., ``dict``, ``list``, ``str``, ``int``, ``float``, or ``bool``).
XCom values are stored as JSON in the database, and SQLAlchemy automatically handles
serialization (``json.dumps``) and deserialization (``json.loads``). However, we
use a custom encoder for serialization (``serialize_value``) and deserialization to handle special
cases, such as encoding tuples via the Airflow Serialization module. These must be decoded
using ``XComDecoder`` to restore original types.
Some XCom values, such as those set via the Task Execution API, bypass ``serialize_value``
and are stored directly in JSON format. Since these values are already deserialized
by SQLAlchemy, they are returned as-is.
**Example: Handling a tuple**:
.. code-block:: python
original_value = (1, 2, 3)
serialized_value = XComModel.serialize_value(original_value)
print(serialized_value)
# '{"__classname__": "builtins.tuple", "__version__": 1, "__data__": [1, 2, 3]}'
This serialized value is stored in the database. When deserialized, the value is restored to the original tuple.
:param result: The XCom database row or object containing a ``value`` attribute.
:return: The deserialized Python object.
"""
if result.value is None:
return None
try:
return json.loads(result.value, cls=XComDecoder)
except (ValueError, TypeError):
# Already deserialized (e.g., set via Task Execution API)
return result.value
| XComModel |
python | weaviate__weaviate-python-client | weaviate/collections/queries/bm25/query/async_.py | {
"start": 290,
"end": 427
} | class ____(
Generic[Properties, References],
_BM25QueryExecutor[ConnectionAsync, Properties, References],
):
pass
| _BM25QueryAsync |
python | tensorflow__tensorflow | tensorflow/python/keras/callbacks_v1.py | {
"start": 1522,
"end": 19759
} | class ____(callbacks.TensorBoard):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved.
[Here are details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn more
about embeddings [in this guide](
https://www.tensorflow.org/programmers_guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
# Don't call super's init since it is an eager-only version.
callbacks.Callback.__init__(self)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer_v2(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph())
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils_v1 # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils_v1.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Args:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.record_if(True):
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_batch_begin(self, batch, logs=None):
if self._total_batches_seen == self._profile_batch - 1:
self._start_profiler()
def on_train_batch_end(self, batch, logs=None):
return self.on_batch_end(batch, logs)
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
self._stop_profiler()
def on_train_begin(self, logs=None):
pass
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
self._stop_profiler()
self.writer.close()
def _start_profiler(self):
"""Starts the profiler if currently inactive."""
if self._profiler_started:
return
try:
profiler.start(logdir=self.log_dir)
self._profiler_started = True
except errors.AlreadyExistsError as e:
# Profiler errors should not be fatal.
logging.error('Failed to start profiler: %s', e.message)
def _stop_profiler(self):
"""Stops the profiler if currently active."""
if not self._profiler_started:
return
try:
profiler.stop()
except errors.UnavailableError as e:
# Profiler errors should not be fatal.
logging.error('Failed to stop profiler: %s', e.message)
finally:
self._profiler_started = False
| TensorBoard |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 132316,
"end": 150436
} | class ____(DialectKWArgs, SelectBase, Generative):
"""Base class for SELECT statements where additional elements can be
added.
This serves as the base for :class:`_expression.Select` and
:class:`_expression.CompoundSelect`
where elements such as ORDER BY, GROUP BY can be added and column
rendering can be controlled. Compare to
:class:`_expression.TextualSelect`, which,
while it subclasses :class:`_expression.SelectBase`
and is also a SELECT construct,
represents a fixed textual string which cannot be altered at this level,
only wrapped as a subquery.
"""
_order_by_clauses: Tuple[ColumnElement[Any], ...] = ()
_group_by_clauses: Tuple[ColumnElement[Any], ...] = ()
_limit_clause: Optional[ColumnElement[Any]] = None
_offset_clause: Optional[ColumnElement[Any]] = None
_fetch_clause: Optional[ColumnElement[Any]] = None
_fetch_clause_options: Optional[Dict[str, bool]] = None
_for_update_arg: Optional[ForUpdateArg] = None
def __init__(self, _label_style: SelectLabelStyle = LABEL_STYLE_DEFAULT):
self._label_style = _label_style
@_generative
def with_for_update(
self,
*,
nowait: bool = False,
read: bool = False,
of: Optional[_ForUpdateOfArgument] = None,
skip_locked: bool = False,
key_share: bool = False,
) -> Self:
"""Specify a ``FOR UPDATE`` clause for this
:class:`_expression.GenerativeSelect`.
E.g.::
stmt = select(table).with_for_update(nowait=True)
On a database like PostgreSQL or Oracle Database, the above would
render a statement like:
.. sourcecode:: sql
SELECT table.a, table.b FROM table FOR UPDATE NOWAIT
on other backends, the ``nowait`` option is ignored and instead
would produce:
.. sourcecode:: sql
SELECT table.a, table.b FROM table FOR UPDATE
When called with no arguments, the statement will render with
the suffix ``FOR UPDATE``. Additional arguments can then be
provided which allow for common database-specific
variants.
:param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle
Database and PostgreSQL dialects.
:param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL,
``FOR SHARE`` on PostgreSQL. On PostgreSQL, when combined with
``nowait``, will render ``FOR SHARE NOWAIT``.
:param of: SQL expression or list of SQL expression elements,
(typically :class:`_schema.Column` objects or a compatible expression,
for some backends may also be a table expression) which will render
into a ``FOR UPDATE OF`` clause; supported by PostgreSQL, Oracle
Database, some MySQL versions and possibly others. May render as a
table or as a column depending on backend.
:param skip_locked: boolean, will render ``FOR UPDATE SKIP LOCKED`` on
Oracle Database and PostgreSQL dialects or ``FOR SHARE SKIP LOCKED``
if ``read=True`` is also specified.
:param key_share: boolean, will render ``FOR NO KEY UPDATE``,
or if combined with ``read=True`` will render ``FOR KEY SHARE``,
on the PostgreSQL dialect.
"""
self._for_update_arg = ForUpdateArg(
nowait=nowait,
read=read,
of=of,
skip_locked=skip_locked,
key_share=key_share,
)
return self
def get_label_style(self) -> SelectLabelStyle:
"""
Retrieve the current label style.
.. versionadded:: 1.4
"""
return self._label_style
def set_label_style(self, style: SelectLabelStyle) -> Self:
"""Return a new selectable with the specified label style.
There are three "label styles" available,
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_DISAMBIGUATE_ONLY`,
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_TABLENAME_PLUS_COL`, and
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_NONE`. The default style is
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_DISAMBIGUATE_ONLY`.
In modern SQLAlchemy, there is not generally a need to change the
labeling style, as per-expression labels are more effectively used by
making use of the :meth:`_sql.ColumnElement.label` method. In past
versions, :data:`_sql.LABEL_STYLE_TABLENAME_PLUS_COL` was used to
disambiguate same-named columns from different tables, aliases, or
subqueries; the newer :data:`_sql.LABEL_STYLE_DISAMBIGUATE_ONLY` now
applies labels only to names that conflict with an existing name so
that the impact of this labeling is minimal.
The rationale for disambiguation is mostly so that all column
expressions are available from a given :attr:`_sql.FromClause.c`
collection when a subquery is created.
.. versionadded:: 1.4 - the
:meth:`_sql.GenerativeSelect.set_label_style` method replaces the
previous combination of ``.apply_labels()``, ``.with_labels()`` and
``use_labels=True`` methods and/or parameters.
.. seealso::
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_DISAMBIGUATE_ONLY`
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_TABLENAME_PLUS_COL`
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_NONE`
:attr:`_sql.SelectLabelStyle.LABEL_STYLE_DEFAULT`
"""
if self._label_style is not style:
self = self._generate()
self._label_style = style
return self
@property
def _group_by_clause(self) -> ClauseList:
"""ClauseList access to group_by_clauses for legacy dialects"""
return ClauseList._construct_raw(
operators.comma_op, self._group_by_clauses
)
@property
def _order_by_clause(self) -> ClauseList:
"""ClauseList access to order_by_clauses for legacy dialects"""
return ClauseList._construct_raw(
operators.comma_op, self._order_by_clauses
)
def _offset_or_limit_clause(
self,
element: _LimitOffsetType,
name: Optional[str] = None,
type_: Optional[_TypeEngineArgument[int]] = None,
) -> ColumnElement[Any]:
"""Convert the given value to an "offset or limit" clause.
This handles incoming integers and converts to an expression; if
an expression is already given, it is passed through.
"""
return coercions.expect(
roles.LimitOffsetRole, element, name=name, type_=type_
)
@overload
def _offset_or_limit_clause_asint(
self, clause: ColumnElement[Any], attrname: str
) -> NoReturn: ...
@overload
def _offset_or_limit_clause_asint(
self, clause: Optional[_OffsetLimitParam], attrname: str
) -> Optional[int]: ...
def _offset_or_limit_clause_asint(
self, clause: Optional[ColumnElement[Any]], attrname: str
) -> Union[NoReturn, Optional[int]]:
"""Convert the "offset or limit" clause of a select construct to an
integer.
This is only possible if the value is stored as a simple bound
parameter. Otherwise, a compilation error is raised.
"""
if clause is None:
return None
try:
value = clause._limit_offset_value
except AttributeError as err:
raise exc.CompileError(
"This SELECT structure does not use a simple "
"integer value for %s" % attrname
) from err
else:
return util.asint(value)
@property
def _limit(self) -> Optional[int]:
"""Get an integer value for the limit. This should only be used
by code that cannot support a limit as a BindParameter or
other custom clause as it will throw an exception if the limit
isn't currently set to an integer.
"""
return self._offset_or_limit_clause_asint(self._limit_clause, "limit")
def _simple_int_clause(self, clause: ClauseElement) -> bool:
"""True if the clause is a simple integer, False
if it is not present or is a SQL expression.
"""
return isinstance(clause, _OffsetLimitParam)
@property
def _offset(self) -> Optional[int]:
"""Get an integer value for the offset. This should only be used
by code that cannot support an offset as a BindParameter or
other custom clause as it will throw an exception if the
offset isn't currently set to an integer.
"""
return self._offset_or_limit_clause_asint(
self._offset_clause, "offset"
)
@property
def _has_row_limiting_clause(self) -> bool:
return (
self._limit_clause is not None
or self._offset_clause is not None
or self._fetch_clause is not None
)
@_generative
def limit(self, limit: _LimitOffsetType) -> Self:
"""Return a new selectable with the given LIMIT criterion
applied.
This is a numerical value which usually renders as a ``LIMIT``
expression in the resulting select. Backends that don't
support ``LIMIT`` will attempt to provide similar
functionality.
.. note::
The :meth:`_sql.GenerativeSelect.limit` method will replace
any clause applied with :meth:`_sql.GenerativeSelect.fetch`.
:param limit: an integer LIMIT parameter, or a SQL expression
that provides an integer result. Pass ``None`` to reset it.
.. seealso::
:meth:`_sql.GenerativeSelect.fetch`
:meth:`_sql.GenerativeSelect.offset`
"""
self._fetch_clause = self._fetch_clause_options = None
self._limit_clause = self._offset_or_limit_clause(limit)
return self
@_generative
def fetch(
self,
count: _LimitOffsetType,
with_ties: bool = False,
percent: bool = False,
**dialect_kw: Any,
) -> Self:
r"""Return a new selectable with the given FETCH FIRST criterion
applied.
This is a numeric value which usually renders as ``FETCH {FIRST | NEXT}
[ count ] {ROW | ROWS} {ONLY | WITH TIES}`` expression in the resulting
select. This functionality is is currently implemented for Oracle
Database, PostgreSQL, MSSQL.
Use :meth:`_sql.GenerativeSelect.offset` to specify the offset.
.. note::
The :meth:`_sql.GenerativeSelect.fetch` method will replace
any clause applied with :meth:`_sql.GenerativeSelect.limit`.
.. versionadded:: 1.4
:param count: an integer COUNT parameter, or a SQL expression
that provides an integer result. When ``percent=True`` this will
represent the percentage of rows to return, not the absolute value.
Pass ``None`` to reset it.
:param with_ties: When ``True``, the WITH TIES option is used
to return any additional rows that tie for the last place in the
result set according to the ``ORDER BY`` clause. The
``ORDER BY`` may be mandatory in this case. Defaults to ``False``
:param percent: When ``True``, ``count`` represents the percentage
of the total number of selected rows to return. Defaults to ``False``
:param \**dialect_kw: Additional dialect-specific keyword arguments
may be accepted by dialects.
.. versionadded:: 2.0.41
.. seealso::
:meth:`_sql.GenerativeSelect.limit`
:meth:`_sql.GenerativeSelect.offset`
"""
self._validate_dialect_kwargs(dialect_kw)
self._limit_clause = None
if count is None:
self._fetch_clause = self._fetch_clause_options = None
else:
self._fetch_clause = self._offset_or_limit_clause(count)
self._fetch_clause_options = {
"with_ties": with_ties,
"percent": percent,
}
return self
@_generative
def offset(self, offset: _LimitOffsetType) -> Self:
"""Return a new selectable with the given OFFSET criterion
applied.
This is a numeric value which usually renders as an ``OFFSET``
expression in the resulting select. Backends that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param offset: an integer OFFSET parameter, or a SQL expression
that provides an integer result. Pass ``None`` to reset it.
.. seealso::
:meth:`_sql.GenerativeSelect.limit`
:meth:`_sql.GenerativeSelect.fetch`
"""
self._offset_clause = self._offset_or_limit_clause(offset)
return self
@_generative
@util.preload_module("sqlalchemy.sql.util")
def slice(
self,
start: int,
stop: int,
) -> Self:
"""Apply LIMIT / OFFSET to this statement based on a slice.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
stmt = select(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. note::
The :meth:`_sql.GenerativeSelect.slice` method will replace
any clause applied with :meth:`_sql.GenerativeSelect.fetch`.
.. versionadded:: 1.4 Added the :meth:`_sql.GenerativeSelect.slice`
method generalized from the ORM.
.. seealso::
:meth:`_sql.GenerativeSelect.limit`
:meth:`_sql.GenerativeSelect.offset`
:meth:`_sql.GenerativeSelect.fetch`
"""
sql_util = util.preloaded.sql_util
self._fetch_clause = self._fetch_clause_options = None
self._limit_clause, self._offset_clause = sql_util._make_slice(
self._limit_clause, self._offset_clause, start, stop
)
return self
@_generative
def order_by(
self,
__first: Union[
Literal[None, _NoArg.NO_ARG],
_ColumnExpressionOrStrLabelArgument[Any],
] = _NoArg.NO_ARG,
/,
*clauses: _ColumnExpressionOrStrLabelArgument[Any],
) -> Self:
r"""Return a new selectable with the given list of ORDER BY
criteria applied.
e.g.::
stmt = select(table).order_by(table.c.id, table.c.name)
Calling this method multiple times is equivalent to calling it once
with all the clauses concatenated. All existing ORDER BY criteria may
be cancelled by passing ``None`` by itself. New ORDER BY criteria may
then be added by invoking :meth:`_orm.Query.order_by` again, e.g.::
# will erase all ORDER BY and ORDER BY new_col alone
stmt = stmt.order_by(None).order_by(new_col)
:param \*clauses: a series of :class:`_expression.ColumnElement`
constructs which will be used to generate an ORDER BY clause.
Alternatively, an individual entry may also be the string name of a
label located elsewhere in the columns clause of the statement which
will be matched and rendered in a backend-specific way based on
context; see :ref:`tutorial_order_by_label` for background on string
label matching in ORDER BY and GROUP BY expressions.
.. seealso::
:ref:`tutorial_order_by` - in the :ref:`unified_tutorial`
:ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
"""
if not clauses and __first is None:
self._order_by_clauses = ()
elif __first is not _NoArg.NO_ARG:
self._order_by_clauses += tuple(
coercions.expect(
roles.OrderByRole, clause, apply_propagate_attrs=self
)
for clause in (__first,) + clauses
)
return self
@_generative
def group_by(
self,
__first: Union[
Literal[None, _NoArg.NO_ARG],
_ColumnExpressionOrStrLabelArgument[Any],
] = _NoArg.NO_ARG,
/,
*clauses: _ColumnExpressionOrStrLabelArgument[Any],
) -> Self:
r"""Return a new selectable with the given list of GROUP BY
criterion applied.
All existing GROUP BY settings can be suppressed by passing ``None``.
e.g.::
stmt = select(table.c.name, func.max(table.c.stat)).group_by(table.c.name)
:param \*clauses: a series of :class:`_expression.ColumnElement`
constructs which will be used to generate an GROUP BY clause.
Alternatively, an individual entry may also be the string name of a
label located elsewhere in the columns clause of the statement which
will be matched and rendered in a backend-specific way based on
context; see :ref:`tutorial_order_by_label` for background on string
label matching in ORDER BY and GROUP BY expressions.
.. seealso::
:ref:`tutorial_group_by_w_aggregates` - in the
:ref:`unified_tutorial`
:ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
""" # noqa: E501
if not clauses and __first is None:
self._group_by_clauses = ()
elif __first is not _NoArg.NO_ARG:
self._group_by_clauses += tuple(
coercions.expect(
roles.GroupByRole, clause, apply_propagate_attrs=self
)
for clause in (__first,) + clauses
)
return self
@CompileState.plugin_for("default", "compound_select")
| GenerativeSelect |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/resource.py | {
"start": 2295,
"end": 4337
} | class ____(ResourceDefinition, ABC):
@property
@abstractmethod
def nested_partial_resources(self) -> Mapping[str, "CoercibleToResource"]: ...
@property
@abstractmethod
def nested_resources(self) -> Mapping[str, Any]: ...
@property
@abstractmethod
def configurable_resource_cls(self) -> type: ...
def get_resource_requirements(self, source_key: str) -> Iterator["ResourceRequirement"]: # pyright: ignore[reportIncompatibleMethodOverride]
for attr_name, partial_resource in self.nested_partial_resources.items():
yield PartialResourceDependencyRequirement(
class_name=self.configurable_resource_cls.__name__,
attr_name=attr_name,
partial_resource=partial_resource,
)
for inner_key, resource in self.nested_resources.items():
if is_coercible_to_resource(resource):
yield from coerce_to_resource(resource).get_resource_requirements(inner_key)
yield from super().get_resource_requirements(source_key)
def get_required_resource_keys(
self, resource_defs: Mapping[str, ResourceDefinition]
) -> AbstractSet[str]:
resolved_keys = set(self.required_resource_keys)
for attr_name, partial_resource in self.nested_partial_resources.items():
if is_coercible_to_resource(partial_resource):
resolved_keys.add(
_resolve_partial_resource_to_key(attr_name, partial_resource, resource_defs)
)
else:
check.failed(
f"Unexpected nested partial resource of type {type(partial_resource)} is not coercible to resource"
)
for resource in self.nested_resources.values():
if is_coercible_to_resource(resource):
resolved_keys.update(
coerce_to_resource(resource).get_required_resource_keys(resource_defs)
)
return resolved_keys
| NestedResourcesResourceDefinition |
python | kamyu104__LeetCode-Solutions | Python/maximize-sum-of-weights-after-edge-removals.py | {
"start": 75,
"end": 2590
} | class ____(object):
def maximizeSumOfWeights(self, edges, k):
"""
:type edges: List[List[int]]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target):
i = left
while i <= right:
if compare(nums[i], target):
nums[i], nums[left] = nums[left], nums[i]
left += 1
i += 1
elif compare(target, nums[i]):
nums[i], nums[right] = nums[right], nums[i]
right -= 1
else:
i += 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx])
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def iter_dfs():
cnt = [[0]*2 for _ in xrange(len(adj))]
stk = [(1, 0, -1)]
while stk:
step, u, p = stk.pop()
if step == 1:
stk.append((2, u, p))
for v, w in reversed(adj[u]):
if v == p:
continue
stk.append((1, v, u))
elif step == 2:
curr = 0
diff = []
for v, w in adj[u]:
if v == p:
continue
curr += cnt[v][0]
diff.append(max((cnt[v][1]+w)-cnt[v][0], 0))
if k-1 < len(diff):
nth_element(diff, k-1, lambda a, b: a > b)
cnt[u][0] = curr+sum(diff[i] for i in xrange(min(k, len(diff))))
cnt[u][1] = curr+sum(diff[i] for i in xrange(min(k-1, len(diff))))
return cnt[0][0]
adj = [[] for _ in xrange(len(edges)+1)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
return iter_dfs()
# Time: O(n)
# Space: O(n)
import random
# dfs, quick select
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 65530,
"end": 69635
} | class ____(GoogleCloudBaseOperator):
r"""
Get an entry by target resource name.
This method allows clients to use the resource name from the source Google Cloud service
to get the Data Catalog Entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogLookupEntryOperator`
:param linked_resource: The full name of the Google Cloud resource the Data Catalog entry
represents. See: https://cloud.google.com/apis/design/resource\_names#full\_resource\_name. Full
names are case-sensitive.
:param sql_resource: The SQL name of the entry. SQL names are case-sensitive.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"linked_resource",
"sql_resource",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
linked_resource: str | None = None,
sql_resource: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.linked_resource = linked_resource
self.sql_resource = sql_resource
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.lookup_entry(
linked_resource=self.linked_resource,
sql_resource=self.sql_resource,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id, location_id, entry_group_id, entry_id = result.name.split("/")[1::2]
DataCatalogEntryLink.persist(
context=context,
entry_id=entry_id,
entry_group_id=entry_group_id,
location_id=location_id,
project_id=project_id,
)
return Entry.to_dict(result)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogUpdateAspectTypeOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogLookupEntryOperator |
python | getsentry__sentry | tests/sentry/web/frontend/test_auth_organization_login.py | {
"start": 44561,
"end": 51957
} | class ____(AuthProviderTestCase):
def setUp(self) -> None:
self.owner = self.create_user()
self.organization = self.create_organization(name="foo", owner=self.owner)
self.user = self.create_user("bar@example.com", is_managed=False, password="")
self.auth_provider_inst = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
self.path = reverse("sentry-auth-organization", args=[self.organization.slug])
self.auth_sso_path = reverse("sentry-auth-sso")
UserEmail.objects.filter(user=self.user, email="bar@example.com").update(is_verified=False)
@mock.patch("sentry.auth.idpmigration.MessageBuilder")
def test_flow_verify_and_link_without_password_sends_email(self, email: mock.MagicMock) -> None:
assert not self.user.has_usable_password()
self.create_member(organization=self.organization, user_id=self.user.id)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
resp = self.client.post(self.auth_sso_path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-account.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == self.user
_, message = email.call_args
context = message["context"]
assert context["user"] == self.user
assert context["email"] == self.user.email
assert context["organization"] == self.organization.name
email.return_value.send_async.assert_called_with([self.user.email])
path = reverse("sentry-idp-email-verification", args=[context["verification_key"]])
resp = self.client.get(path)
assert resp.templates[0].name == "sentry/idp_account_verified.html"
assert resp.status_code == 200
path = reverse("sentry-auth-organization", args=[self.organization.slug])
resp = self.client.post(path, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=self.auth_provider_inst)
assert self.user == auth_identity.user
@mock.patch("sentry.auth.idpmigration.MessageBuilder")
def test_flow_verify_without_org_membership(self, email: mock.MagicMock) -> None:
assert not self.user.has_usable_password()
with assume_test_silo_mode(SiloMode.REGION):
assert not OrganizationMember.objects.filter(
organization=self.organization, user_id=self.user.id
).exists()
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
resp = self.client.post(self.auth_sso_path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-account.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == self.user
_, message = email.call_args
context = message["context"]
assert context["organization"] == self.organization.name
path = reverse("sentry-idp-email-verification", args=[context["verification_key"]])
resp = self.client.get(path)
assert resp.templates[0].name == "sentry/idp_account_verified.html"
assert resp.status_code == 200
path = reverse("sentry-auth-organization", args=[self.organization.slug])
resp = self.client.post(path, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=self.auth_provider_inst)
assert self.user == auth_identity.user
# Check that OrganizationMember was created as a side effect
with assume_test_silo_mode(SiloMode.REGION):
assert OrganizationMember.objects.filter(
organization=self.organization, user_id=self.user.id
).exists()
@mock.patch("sentry.auth.idpmigration.MessageBuilder")
def test_flow_verify_and_link_without_password_login_success(
self, email: mock.MagicMock
) -> None:
assert not self.user.has_usable_password()
self.create_member(organization=self.organization, user_id=self.user.id)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
resp = self.client.post(self.auth_sso_path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-account.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == self.user
path = reverse(
"sentry-idp-email-verification",
args=[email.call_args.kwargs["context"]["verification_key"]],
)
resp = self.client.get(path)
assert resp.templates[0].name == "sentry/idp_account_verified.html"
assert resp.status_code == 200
path = reverse("sentry-auth-organization", args=[self.organization.slug])
resp = self.client.post(path, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-login"), 302),
("/organizations/foo/issues/", 302),
]
auth_identity = AuthIdentity.objects.get(auth_provider=self.auth_provider_inst)
assert self.user == auth_identity.user
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
assert getattr(member.flags, "sso:linked")
assert not getattr(member.flags, "sso:invalid")
assert not getattr(member.flags, "member-limit:restricted")
@mock.patch("sentry.auth.idpmigration.MessageBuilder")
def test_flow_verify_and_link_without_password_need_2fa(self, email: mock.MagicMock) -> None:
assert not self.user.has_usable_password()
self.create_member(organization=self.organization, user_id=self.user.id)
TotpInterface().enroll(self.user)
resp = self.client.post(self.path, {"init": True})
assert resp.status_code == 200
assert PLACEHOLDER_TEMPLATE in resp.content.decode("utf-8")
resp = self.client.post(self.auth_sso_path, {"email": "bar@example.com"})
self.assertTemplateUsed(resp, "sentry/auth-confirm-account.html")
assert resp.status_code == 200
assert resp.context["existing_user"] == self.user
path = reverse(
"sentry-idp-email-verification",
args=[email.call_args.kwargs["context"]["verification_key"]],
)
resp = self.client.get(path)
assert resp.templates[0].name == "sentry/idp_account_verified.html"
assert resp.status_code == 200
path = reverse("sentry-auth-organization", args=[self.organization.slug])
resp = self.client.post(path, follow=True)
assert resp.redirect_chain == [
(reverse("sentry-2fa-dialog"), 302),
]
@control_silo_test
| OrganizationAuthLoginNoPasswordTest |
python | PyCQA__pylint | pylint/checkers/spelling.py | {
"start": 4020,
"end": 5999
} | class ____(Chunker): # type: ignore[misc]
"""This chunker allows splitting words like 'before/after' into 'before' and
'after'.
"""
_text: str
def next(self) -> tuple[str, int]:
while True:
if not self._text:
raise StopIteration()
if "/" not in self._text:
text = self._text
self._offset = 0
self._text = ""
return text, 0
pre_text, post_text = self._text.split("/", 1)
self._text = post_text
self._offset = 0
if not (
pre_text
and post_text
and pre_text[-1].isalpha()
and post_text[0].isalpha()
):
self._text = ""
self._offset = 0
return f"{pre_text}/{post_text}", 0
return pre_text, 0
def _next(self) -> tuple[str, Literal[0]]:
while True:
if "/" not in self._text:
return self._text, 0
pre_text, post_text = self._text.split("/", 1)
if not (pre_text and post_text):
break
if not (pre_text[-1].isalpha() and post_text[0].isalpha()):
raise StopIteration()
self._text = pre_text + " " + post_text
raise StopIteration()
CODE_FLANKED_IN_BACKTICK_REGEX = re.compile(r"(\s|^)(`{1,2})([^`]+)(\2)([^`]|$)")
def _strip_code_flanked_in_backticks(line: str) -> str:
"""Alter line so code flanked in back-ticks is ignored.
Pyenchant automatically strips back-ticks when parsing tokens,
so this cannot be done at the individual filter level.
"""
def replace_code_but_leave_surrounding_characters(match_obj: re.Match[str]) -> str:
return match_obj.group(1) + match_obj.group(5)
return CODE_FLANKED_IN_BACKTICK_REGEX.sub(
replace_code_but_leave_surrounding_characters, line
)
| ForwardSlashChunker |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/colorbar/title/_font.py | {
"start": 233,
"end": 9969
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour.colorbar.title"
_path_str = "histogram2dcontour.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 27127,
"end": 27599
} | class ____(FeedSlot):
"""Instrumented FeedSlot subclass for keeping track of calls to
start_exporting and finish_exporting."""
def start_exporting(self):
self.update_listener("start")
super().start_exporting()
def finish_exporting(self):
self.update_listener("finish")
super().finish_exporting()
@classmethod
def subscribe__listener(cls, listener):
cls.update_listener = listener.update
| InstrumentedFeedSlot |
python | python-excel__xlwt | xlwt/Cell.py | {
"start": 546,
"end": 924
} | class ____(object):
__slots__ = ["rowx", "colx", "xf_idx"]
def __init__(self, rowx, colx, xf_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
def get_biff_data(self):
# return BIFFRecords.BlankRecord(self.rowx, self.colx, self.xf_idx).get()
return pack('<5H', 0x0201, 6, self.rowx, self.colx, self.xf_idx)
| BlankCell |
python | huggingface__transformers | src/transformers/models/efficientnet/modeling_efficientnet.py | {
"start": 17997,
"end": 20188
} | class ____(EfficientNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.efficientnet = EfficientNetModel(config)
# Classifier head
self.dropout = nn.Dropout(p=config.dropout_rate)
self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
__all__ = ["EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel"]
| EfficientNetForImageClassification |
python | spyder-ide__spyder | setup.py | {
"start": 5003,
"end": 14326
} | class ____(install_data):
def run(self):
install_data.run(self)
if sys.platform.startswith('linux'):
try:
subprocess.call(['update-desktop-database'])
except:
print("ERROR: unable to update desktop database",
file=sys.stderr)
CMDCLASS = {'install_data': CustomInstallData}
# =============================================================================
# Main scripts
# =============================================================================
# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows
# platforms due to a bug in pip installation process
# See spyder-ide/spyder#1158.
SCRIPTS = ['%s_win_post_install.py' % NAME]
SCRIPTS.append('spyder')
if os.name == 'nt':
SCRIPTS += ['spyder.bat']
# =============================================================================
# Files added to the package
# =============================================================================
EXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',
'.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom',
'.ico', '.gif', '.mp3', '.ogg', '.sfd', '.bat', '.sh']
# =============================================================================
# Use Readme for long description
# =============================================================================
with io.open('README.md', encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
#==============================================================================
# Setup arguments
#==============================================================================
setup_args = dict(
name=NAME,
version=__version__,
description='The Scientific Python Development Environment',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
download_url=__website_url__ + "#fh5co-download",
author="The Spyder Project Contributors",
author_email="spyder.python@gmail.com",
url=__website_url__,
license='MIT',
keywords='PyQt5 editor console widgets IDE science data analysis IPython',
platforms=["Windows", "Linux", "Mac OS-X"],
packages=get_packages(),
package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST)},
scripts=[osp.join('scripts', fname) for fname in SCRIPTS],
data_files=get_data_files(),
python_requires='>=3.9',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Widget Sets',
],
cmdclass=CMDCLASS,
)
# Qt bindings requirements
qt_requirements = {
'pyqt5': [
'pyqt5>=5.15,<5.16',
'pyqtwebengine>=5.15,<5.16',
'qtconsole>=5.7.0,<5.8.0',
],
'pyqt6': [
'pyqt6>=6.5,<7',
'pyqt6-webengine>=6.5,<7',
'qtconsole>=5.7.0,<5.8.0',
],
'pyside6': [
'pyside6>=6.5,<7',
'qtconsole>=5.7.0,<5.8.0',
],
'conda-forge': [
'qtconsole>=5.7.0,<5.8.0',
]
}
# Get the proper requirements for the selected Qt binding
install_requires = get_qt_requirements(qt_requirements, default='pyqt5')
install_requires += [
'aiohttp>=3.11.2',
'applaunchservices>=0.3.0;platform_system=="Darwin"',
'asyncssh>=2.14.0,<3.0.0',
'atomicwrites>=1.2.0',
'bcrypt>=4.3.0',
'chardet>=2.0.0',
'cloudpickle>=0.5.0',
'cookiecutter>=1.6.0',
'diff-match-patch>=20181111',
# While this is only required for python <3.10, it is safe enough to
# install in all cases and helps the tests to pass.
'importlib-metadata>=4.6.0',
'intervaltree>=3.0.2',
# Note that on IPython 9.0.0 Python 3.10 support was dropped
'ipython>=8.15.0,<10.0.0,!=8.17.1,!=9.1.0,!=9.2.0,!=9.3.0,!=9.4.0',
'ipython_pygments_lexers>=1.0',
'jedi>=0.17.2,<0.20.0',
'jellyfish>=0.7',
'lxml>=4.9.0',
'jsonschema>=3.2.0',
'keyring>=17.0.0',
'nbconvert>=4.0',
'numpydoc>=0.6.0',
'packaging>=20.0',
'parso>=0.7.0,<0.9.0',
'pexpect>=4.4.0',
'pickleshare>=0.4',
'psutil>=5.3',
'pygithub>=2.3.0',
'pygments>=2.0',
'pylint>=3.1,<4',
'pylint-venv>=3.0.2',
'pyls-spyder>=0.4.0',
'python-lsp-black>=2.0.0,<3.0.0',
'python-lsp-ruff>=2.3.0,<3.0.0',
'python-lsp-server[all]>=1.13.0,<1.14.0',
'pyuca>=1.2',
'pyxdg>=0.26;platform_system=="Linux"',
'pyzmq>=24.0.0',
'qdarkstyle>=3.2.0,<3.3.0',
'qstylizer>=0.2.2',
'qtawesome>=1.4.0,<1.5.0',
'qtpy>=2.4.0',
'rtree>=0.9.7',
'sphinx>=7.2.0',
'spyder-kernels>=3.1.0b1,<3.2.0a2',
'superqt>=0.6.2,<1.0.0',
'textdistance>=4.2.0',
'three-merge>=0.1.1',
'watchdog>=0.10.3',
'yarl>=1.9.4',
]
# Loosen constraints to ensure dev versions still work
if 'dev' in __version__:
reqs_to_loosen = {
'python-lsp-server[all]',
'qtconsole',
'qtconsole-base',
'spyder-kernels',
}
install_requires = [req for req in install_requires
if req.split(">")[0] not in reqs_to_loosen]
install_requires.append('python-lsp-server[all]>=1.13.0,<1.15.0')
install_requires.append('qtconsole>=5.7.0,<5.9.0')
extras_require = {
'test': [
'coverage',
'cython',
'flaky',
'matplotlib',
'pandas',
'pillow',
'pytest<8.0',
'pytest-cov',
'pytest-lazy-fixture',
'pytest-mock',
'pytest-order',
'pytest-qt',
'pytest-timeout',
'pywin32;platform_system=="Windows"',
'pyyaml',
'scipy',
'sympy',
],
}
spyder_plugins_entry_points = [
'appearance = spyder.plugins.appearance.plugin:Appearance',
'application = spyder.plugins.application.plugin:Application',
'completions = spyder.plugins.completion.plugin:CompletionPlugin',
'debugger = spyder.plugins.debugger.plugin:Debugger',
'editor = spyder.plugins.editor.plugin:Editor',
'explorer = spyder.plugins.explorer.plugin:Explorer',
'external_terminal = spyder.plugins.externalterminal.plugin:ExternalTerminal',
'find_in_files = spyder.plugins.findinfiles.plugin:FindInFiles',
'help = spyder.plugins.help.plugin:Help',
'historylog = spyder.plugins.history.plugin:HistoryLog',
'internal_console = spyder.plugins.console.plugin:Console',
'ipython_console = spyder.plugins.ipythonconsole.plugin:IPythonConsole',
'layout = spyder.plugins.layout.plugin:Layout',
'main_interpreter = spyder.plugins.maininterpreter.plugin:MainInterpreter',
'mainmenu = spyder.plugins.mainmenu.plugin:MainMenu',
'onlinehelp = spyder.plugins.onlinehelp.plugin:OnlineHelp',
'outline_explorer = spyder.plugins.outlineexplorer.plugin:OutlineExplorer',
'plots = spyder.plugins.plots.plugin:Plots',
'preferences = spyder.plugins.preferences.plugin:Preferences',
'profiler = spyder.plugins.profiler.plugin:Profiler',
'project_explorer = spyder.plugins.projects.plugin:Projects',
'pylint = spyder.plugins.pylint.plugin:Pylint',
'pythonpath_manager = spyder.plugins.pythonpath.plugin:PythonpathManager',
'remoteclient = spyder.plugins.remoteclient.plugin:RemoteClient',
'run = spyder.plugins.run.plugin:Run',
'shortcuts = spyder.plugins.shortcuts.plugin:Shortcuts',
'statusbar = spyder.plugins.statusbar.plugin:StatusBar',
'switcher = spyder.plugins.switcher.plugin:Switcher',
'toolbar = spyder.plugins.toolbar.plugin:Toolbar',
'tours = spyder.plugins.tours.plugin:Tours',
'update_manager = spyder.plugins.updatemanager.plugin:UpdateManager',
'variable_explorer = spyder.plugins.variableexplorer.plugin:VariableExplorer',
'workingdir = spyder.plugins.workingdirectory.plugin:WorkingDirectory',
]
spyder_completions_entry_points = [
('fallback = spyder.plugins.completion.providers.fallback.provider:'
'FallbackProvider'),
('snippets = spyder.plugins.completion.providers.snippets.provider:'
'SnippetsProvider'),
('lsp = spyder.plugins.completion.providers.languageserver.provider:'
'LanguageServerProvider'),
]
setup_args['install_requires'] = install_requires
setup_args['extras_require'] = extras_require
setup_args['entry_points'] = {
'gui_scripts': [
'spyder = spyder.app.start:main'
],
'spyder.plugins': spyder_plugins_entry_points,
'spyder.completions': spyder_completions_entry_points
}
setup_args.pop('scripts', None)
# =============================================================================
# Main setup
# =============================================================================
setup(**setup_args)
| CustomInstallData |
python | pypa__pip | tests/lib/server.py | {
"start": 619,
"end": 5829
} | class ____(WSGIRequestHandler):
def make_environ(self) -> dict[str, Any]:
environ = super().make_environ()
# From pallets/werkzeug#1469, will probably be in release after
# 0.16.0.
try:
# binary_form=False gives nicer information, but wouldn't be
# compatible with what Nginx or Apache could return.
peer_cert = self.connection.getpeercert(binary_form=True)
if peer_cert is not None:
# Nginx and Apache use PEM format.
environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(
peer_cert,
)
except ValueError:
# SSL handshake hasn't finished.
self.server.log("error", "Cannot fetch SSL peer certificate info")
except AttributeError:
# Not using TLS, the socket will not have getpeercert().
pass
return environ
def _mock_wsgi_adapter(
mock: Callable[["WSGIEnvironment", "StartResponse"], "WSGIApplication"],
) -> "WSGIApplication":
"""Uses a mock to record function arguments and provide
the actual function that should respond.
"""
def adapter(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
try:
responder = mock(environ, start_response)
except StopIteration:
raise RuntimeError("Ran out of mocked responses.")
return responder(environ, start_response)
return adapter
def make_mock_server(**kwargs: Any) -> _MockServer:
"""Creates a mock HTTP(S) server listening on a random port on localhost.
The `mock` property of the returned server provides and records all WSGI
interactions, so one approach to testing could be
server = make_mock_server()
server.mock.side_effects = [
page1,
page2,
]
with server_running(server):
# ... use server...
...
assert server.mock.call_count > 0
call_args_list = server.mock.call_args_list
# `environ` is a dictionary defined as per PEP 3333 with the associated
# contents. Additional properties may be added by werkzeug.
environ, _ = call_args_list[0].args
assert environ["PATH_INFO"].startswith("/hello/simple")
Note that the server interactions take place in a different thread, so you
do not want to touch the server.mock within the `server_running` block.
Note also for pip interactions that "localhost" is a "secure origin", so
be careful using this for failure tests of `--trusted-host`.
"""
kwargs.setdefault("request_handler", _RequestHandler)
mock = Mock()
app = _mock_wsgi_adapter(mock)
server = _make_server("localhost", 0, app=app, **kwargs)
server.mock = mock
return server
@contextmanager
def server_running(server: BaseWSGIServer) -> Iterator[None]:
"""Context manager for running the provided server in a separate thread."""
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
with blocked_signals():
thread.start()
try:
yield
finally:
server.shutdown()
thread.join()
# Helper functions for making responses in a declarative way.
def text_html_response(text: str) -> "WSGIApplication":
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
start_response(
"200 OK",
[
("Content-Type", "text/html; charset=UTF-8"),
],
)
return [text.encode("utf-8")]
return responder
def html5_page(text: str) -> str:
return (
dedent(
"""
<!DOCTYPE html>
<html>
<body>
{}
</body>
</html>
"""
)
.strip()
.format(text)
)
def package_page(spec: dict[str, str]) -> "WSGIApplication":
def link(name: str, value: str) -> str:
return f'<a href="{value}">{name}</a>'
links = "".join(link(*kv) for kv in spec.items())
return text_html_response(html5_page(links))
def file_response(path: pathlib.Path) -> "WSGIApplication":
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
start_response(
"200 OK",
[
("Content-Type", "application/octet-stream"),
("Content-Length", str(path.stat().st_size)),
],
)
return [path.read_bytes()]
return responder
def authorization_response(path: pathlib.Path) -> "WSGIApplication":
correct_auth = "Basic " + b64encode(b"USERNAME:PASSWORD").decode("ascii")
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
if environ.get("HTTP_AUTHORIZATION") != correct_auth:
start_response("401 Unauthorized", [("WWW-Authenticate", "Basic")])
return ()
start_response(
"200 OK",
[
("Content-Type", "application/octet-stream"),
("Content-Length", str(path.stat().st_size)),
],
)
return [path.read_bytes()]
return responder
| _RequestHandler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.